Merge tag 'irqchip-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm...
authorThomas Gleixner <tglx@linutronix.de>
Mon, 10 Jan 2022 12:55:41 +0000 (13:55 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 10 Jan 2022 12:55:41 +0000 (13:55 +0100)
Pull irqchip updates from Marc Zyngier:

 - Fix GICv3 redistributor table reservation with RT across kexec

 - Fix GICv4.1 redistributor view of the VPE table across kexec

 - Add support for extra interrupts on spear-shirq

 - Make obtaining some interrupts optional for the Renesas drivers

 - Various cleanups and bug fixes

Link: https://lore.kernel.org/lkml/20220108130807.4109738-1-maz@kernel.org
894 files changed:
.mailmap
Documentation/admin-guide/blockdev/drbd/figures.rst
Documentation/admin-guide/blockdev/drbd/node-states-8.dot [deleted file]
Documentation/admin-guide/blockdev/drbd/peer-states-8.dot [new file with mode: 0644]
Documentation/admin-guide/kernel-parameters.txt
Documentation/conf.py
Documentation/devicetree/bindings/i2c/apple,i2c.yaml
Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
Documentation/devicetree/bindings/input/gpio-keys.yaml
Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
Documentation/devicetree/bindings/net/ethernet-phy.yaml
Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
Documentation/devicetree/bindings/power/supply/bq25980.yaml
Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
Documentation/devicetree/bindings/spi/spi-rockchip.yaml
Documentation/i2c/summary.rst
Documentation/locking/locktypes.rst
Documentation/networking/bonding.rst
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/timestamping.rst
Documentation/process/changes.rst
Documentation/process/submitting-patches.rst
Documentation/sound/hd-audio/models.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/imx6qdl-wandboard.dtsi
arch/arm/boot/dts/imx6qp-prtwd3.dts
arch/arm/boot/dts/imx6ull-pinfunc.h
arch/arm/boot/dts/ls1021a-tsn.dts
arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
arch/arm/boot/dts/socfpga_arria5_socdk.dts
arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
arch/arm/include/asm/efi.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/head-nommu.S
arch/arm/mach-rockchip/platsmp.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
arch/arm64/boot/dts/apple/t8103-j274.dts
arch/arm64/boot/dts/apple/t8103.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
arch/arm64/include/asm/efi.h
arch/arm64/kernel/machine_kexec_file.c
arch/csky/kernel/traps.c
arch/mips/include/asm/mach-ralink/spaces.h
arch/mips/include/asm/pci.h
arch/mips/net/bpf_jit_comp.h
arch/mips/pci/pci-generic.c
arch/parisc/Kconfig
arch/parisc/include/asm/futex.h
arch/parisc/kernel/syscall.S
arch/parisc/kernel/traps.c
arch/powerpc/kernel/module_64.c
arch/powerpc/mm/ptdump/ptdump.c
arch/powerpc/platforms/85xx/smp.c
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
arch/riscv/include/asm/efi.h
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/kernel/ftrace.c
arch/s390/kernel/irq.c
arch/s390/kernel/machine_kexec_file.c
arch/x86/Kconfig
arch/x86/include/asm/efi.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pkru.h
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/debugfs.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_iter.c
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/quirks.c
arch/x86/tools/relocs.c
block/blk-iocost.c
block/fops.c
block/ioprio.c
drivers/Makefile
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/ahci_ceva.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/auxdisplay/charlcd.c
drivers/base/power/main.c
drivers/block/xen-blkfront.c
drivers/bus/mhi/core/pm.c
drivers/bus/mhi/pci_generic.c
drivers/bus/sunxi-rsb.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/clk.c
drivers/clk/imx/clk-imx8qxp-lpcg.c
drivers/clk/imx/clk-imx8qxp.c
drivers/clk/qcom/clk-alpha-pll.c
drivers/clk/qcom/clk-regmap-mux.c
drivers/clk/qcom/common.c
drivers/clk/qcom/common.h
drivers/clk/qcom/gcc-sm6125.c
drivers/clk/versatile/clk-icst.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/dw_apb_timer_of.c
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/dw-edma/dw-edma-pcie.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/submit.c
drivers/dma/st_fdma.c
drivers/dma/ti/k3-udma.c
drivers/edac/i10nm_base.c
drivers/firmware/scpi_pm_domain.c
drivers/firmware/tegra/bpmp-debugfs.c
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-virtio.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
drivers/gpu/drm/amd/include/discovery.h
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_gtt.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/lima/lima_device.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/tiny/simpledrm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-bigbenff.c
drivers/hid/hid-chicony.c
drivers/hid/hid-corsair.c
drivers/hid/hid-elan.c
drivers/hid/hid-elo.c
drivers/hid/hid-ft260.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-holtek-kbd.c
drivers/hid/hid-holtek-mouse.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lg.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-quirks.c
drivers/hid/hid-roccat-arvo.c
drivers/hid/hid-roccat-isku.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-konepure.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-roccat-lua.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/hid-roccat-ryos.c
drivers/hid/hid-roccat-savu.c
drivers/hid/hid-samsung.c
drivers/hid/hid-sony.c
drivers/hid/hid-thrustmaster.c
drivers/hid/hid-u2fzero.c
drivers/hid/hid-uclogic-core.c
drivers/hid/hid-uclogic-params.c
drivers/hid/hid-vivaldi.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/wacom_sys.c
drivers/hv/Kconfig
drivers/hwmon/corsair-psu.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/lm90.c
drivers/hwmon/nct6775.c
drivers/hwmon/pwm-fan.c
drivers/hwmon/sht4x.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-virtio.c
drivers/i2c/i2c-dev.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/accel/kxsd9.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad7768-1.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/axp20x_adc.c
drivers/iio/adc/dln2-adc.c
drivers/iio/adc/stm32-adc.c
drivers/iio/gyro/adxrs290.c
drivers/iio/gyro/itg3200_buffer.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/ltr501.c
drivers/iio/light/stk3310.c
drivers/iio/trigger/stm32-timer-trigger.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/core/uverbs_uapi.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_srq.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/pble.c
drivers/infiniband/hw/irdma/pble.h
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/irdma/verbs.h
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
drivers/input/joystick/spaceball.c
drivers/input/misc/iqs626a.c
drivers/input/mouse/appletouch.c
drivers/input/mouse/elantech.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/goodix.h
drivers/input/touchscreen/goodix_fwupload.c
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-aspeed-scu-ic.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-nvic.c
drivers/isdn/mISDN/core.c
drivers/isdn/mISDN/core.h
drivers/isdn/mISDN/layer1.c
drivers/mailbox/bcm-flexrm-mailbox.c
drivers/md/bcache/super.c
drivers/md/dm-integrity.c
drivers/md/md.c
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/raid1.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/eeprom/at25.c
drivers/misc/fastrpc.c
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/host.c
drivers/mmc/host/meson-mx-sdhc-mmc.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-tegra.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/Kconfig
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_options.c
drivers/net/can/kvaser_pciefd.c
drivers/net/can/m_can/m_can.c
drivers/net/can/m_can/m_can.h
drivers/net/can/m_can/m_can_pci.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/ems_pcmcia.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/ocelot/felix.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/atheros/ag71xx.c
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/google/gve/gve_adminq.c
drivers/net/ethernet/google/gve/gve_utils.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
drivers/net/ethernet/intel/ice/ice_fdir.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_ptp.h
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igc/igc_i225.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/micrel/ks8851_par.c
drivers/net/ethernet/microsoft/mana/hw_channel.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/sfc/ef100_nic.c
drivers/net/ethernet/sfc/falcon/rx.c
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/fjes/fjes_main.c
drivers/net/hamradio/mkiss.c
drivers/net/ieee802154/atusb.c
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/ethtool.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phylink.c
drivers/net/tun.c
drivers/net/usb/asix_common.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rndis_host.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath11k/mhi.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
drivers/net/wireless/intel/iwlegacy/Kconfig
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_imem.h
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/rx.c
drivers/net/xen-netfront.c
drivers/nfc/st21nfca/i2c.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/zns.c
drivers/nvme/target/tcp.c
drivers/of/irq.c
drivers/pci/controller/Kconfig
drivers/pci/controller/dwc/pci-exynos.c
drivers/pci/controller/dwc/pcie-qcom-ep.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/controller/pcie-apple.c
drivers/pci/msi.c
drivers/phy/hisilicon/phy-hi3670-pcie.c
drivers/phy/marvell/phy-mvebu-cp110-utmi.c
drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/qualcomm/phy-qcom-usb-hsic.c
drivers/phy/st/phy-stm32-usbphyc.c
drivers/phy/ti/phy-am654-serdes.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/phy/ti/phy-omap-usb2.c
drivers/phy/ti/phy-tusb1210.c
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/x86/Makefile
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/intel/Kconfig
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/pmc/pltdrv.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/system76_acpi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/reset/tegra/reset-bpmp.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_iscsi.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/scsi_debug.c
drivers/scsi/vmw_pvscsi.c
drivers/soc/imx/imx8m-blk-ctrl.c
drivers/soc/imx/soc-imx.c
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/soc/tegra/fuse/fuse.h
drivers/spi/spi-armada-3700.c
drivers/tee/amdtee/core.c
drivers/tee/optee/core.c
drivers/tee/optee/smc_abi.c
drivers/tee/tee_shm.c
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/n_hdlc.c
drivers/tty/serial/8250/8250_fintek.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/cdns3/cdnsp-trace.h
drivers/usb/cdns3/host.c
drivers/usb/core/config.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/early/xhci-dbc.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/legacy/dbgp.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mtk-sch.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/mtu3/mtu3_gadget.c
drivers/usb/mtu3/mtu3_qmu.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vhost/vdpa.c
drivers/video/fbdev/core/fbmem.c
drivers/virt/nitro_enclaves/ne_misc_dev.c
drivers/virtio/virtio_ring.c
drivers/xen/events/events_base.c
fs/afs/file.c
fs/afs/super.c
fs/aio.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/free-space-tree.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/root-tree.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/cifs/connect.c
fs/cifs/fs_context.c
fs/cifs/inode.c
fs/cifs/sess.c
fs/file.c
fs/io-wq.c
fs/io_uring.c
fs/ksmbd/ndr.c
fs/ksmbd/smb2ops.c
fs/ksmbd/smb2pdu.c
fs/namespace.c
fs/netfs/read_helper.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsproc.c
fs/signalfd.c
fs/smbfs_common/cifs_arc4.c
fs/tracefs/inode.c
fs/xfs/xfs_super.c
fs/zonefs/super.c
include/linux/bpf.h
include/linux/btf.h
include/linux/cacheinfo.h
include/linux/compiler.h
include/linux/delay.h
include/linux/device/driver.h
include/linux/efi.h
include/linux/fb.h
include/linux/filter.h
include/linux/gfp.h
include/linux/hid.h
include/linux/instrumentation.h
include/linux/interrupt.h
include/linux/ipv6.h
include/linux/irqdomain.h
include/linux/memblock.h
include/linux/mhi.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/pagemap.h
include/linux/percpu-refcount.h
include/linux/phy.h
include/linux/pm_runtime.h
include/linux/regulator/driver.h
include/linux/skbuff.h
include/linux/tee_drv.h
include/linux/virtio_net.h
include/linux/wait.h
include/net/bond_alb.h
include/net/busy_poll.h
include/net/netfilter/nf_conntrack.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/seg6.h
include/net/sock.h
include/trace/events/vmscan.h
include/uapi/asm-generic/poll.h
include/uapi/linux/byteorder/big_endian.h
include/uapi/linux/byteorder/little_endian.h
include/uapi/linux/mptcp.h
include/uapi/linux/nfc.h
include/uapi/linux/resource.h
include/xen/events.h
kernel/audit.c
kernel/bpf/btf.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-internal.h
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/crash_core.c
kernel/irq/generic-chip.c
kernel/irq/manage.c
kernel/locking/rtmutex.c
kernel/sched/wait.c
kernel/signal.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events_synth.c
kernel/ucount.c
lib/Kconfig.debug
mm/Kconfig
mm/backing-dev.c
mm/damon/core.c
mm/damon/dbgfs.c
mm/damon/vaddr-test.h
mm/damon/vaddr.c
mm/filemap.c
mm/hugetlb.c
mm/kfence/core.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/slub.c
mm/swap_slots.c
mm/vmscan.c
net/ax25/af_ax25.c
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/soft-interface.c
net/bridge/br_ioctl.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_sysfs_br.c
net/bridge/br_vlan_options.c
net/core/dev.c
net/core/devlink.c
net/core/flow_dissector.c
net/core/lwtunnel.c
net/core/neighbour.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock_map.c
net/dsa/tag_ocelot.c
net/ethtool/netlink.c
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/icmp.c
net/ipv6/ip6_vti.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6.c
net/ipv6/seg6_iptunnel.c
net/ipv6/seg6_local.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/driver-ops.h
net/mac80211/ieee80211_i.h
net/mac80211/mesh.h
net/mac80211/mesh_pathtbl.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mac80211/util.c
net/mctp/neigh.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/sockopt.c
net/ncsi/ncsi-netlink.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_set_pipapo_avx2.c
net/netrom/af_netrom.c
net/nfc/netlink.c
net/openvswitch/flow.c
net/packet/af_packet.c
net/phonet/pep.c
net/rds/connection.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_cake.c
net/sched/sch_ets.c
net/sched/sch_fq_pie.c
net/sched/sch_frag.c
net/sched/sch_qfq.c
net/sctp/diag.c
net/sctp/endpointola.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_cdc.c
net/smc/smc_cdc.h
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_llc.c
net/smc/smc_wr.c
net/smc/smc_wr.h
net/tipc/crypto.c
net/tipc/socket.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/reg.c
net/xdp/xsk_buff_pool.c
samples/ftrace/Makefile
samples/ftrace/ftrace-direct-modify.c
samples/ftrace/ftrace-direct-multi-modify.c [new file with mode: 0644]
samples/ftrace/ftrace-direct-too.c
samples/ftrace/ftrace-direct.c
scripts/recordmcount.pl
security/selinux/hooks.c
security/tomoyo/util.c
sound/core/control_compat.c
sound/core/jack.c
sound/core/oss/pcm_oss.c
sound/core/rawmidi.c
sound/drivers/opl3/opl3_midi.c
sound/hda/intel-sdw-acpi.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/pci-acp6x.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682s.c
sound/soc/codecs/tas2770.c
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wsa881x.c
sound/soc/meson/aiu-encoder-i2s.c
sound/soc/meson/aiu-fifo-i2s.c
sound/soc/meson/aiu-fifo.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/tegra/tegra210_adx.c
sound/soc/tegra/tegra210_amx.c
sound/soc/tegra/tegra210_mixer.c
sound/soc/tegra/tegra210_mvc.c
sound/soc/tegra/tegra210_sfc.c
sound/soc/tegra/tegra_asoc_machine.c
sound/soc/tegra/tegra_asoc_machine.h
sound/usb/mixer_quirks.c
tools/bpf/resolve_btfids/main.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-libpython-version.c [deleted file]
tools/include/linux/debug_locks.h [deleted file]
tools/include/linux/hardirq.h [deleted file]
tools/include/linux/irqflags.h [deleted file]
tools/include/linux/lockdep.h [deleted file]
tools/include/linux/proc_fs.h [deleted file]
tools/include/linux/spinlock.h
tools/include/linux/stacktrace.h [deleted file]
tools/perf/Makefile.config
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/bench/sched-messaging.c
tools/perf/builtin-inject.c
tools/perf/builtin-script.c
tools/perf/scripts/python/intel-pt-events.py
tools/perf/tests/expr.c
tools/perf/tests/parse-metric.c
tools/perf/ui/tui/setup.c
tools/perf/util/bpf_skel/bperf.h [deleted file]
tools/perf/util/bpf_skel/bperf_follower.bpf.c
tools/perf/util/bpf_skel/bperf_leader.bpf.c
tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
tools/perf/util/event.h
tools/perf/util/expr.c
tools/perf/util/header.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt.c
tools/perf/util/perf_regs.c
tools/perf/util/pmu.c
tools/perf/util/python.c
tools/perf/util/smt.c
tools/power/acpi/Makefile.config
tools/power/acpi/Makefile.rules
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
tools/testing/selftests/bpf/progs/test_module_attach.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
tools/testing/selftests/bpf/verifier/atomic_fetch.c
tools/testing/selftests/bpf/verifier/search_pruning.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
tools/testing/selftests/cgroup/cgroup_util.c
tools/testing/selftests/cgroup/test_core.c
tools/testing/selftests/damon/.gitignore [new file with mode: 0644]
tools/testing/selftests/damon/Makefile
tools/testing/selftests/damon/_debugfs_common.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_attrs.sh
tools/testing/selftests/damon/debugfs_empty_targets.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_huge_count_read_write.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_schemes.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_target_ids.sh [new file with mode: 0644]
tools/testing/selftests/damon/huge_count_read_write.c [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
tools/testing/selftests/kvm/x86_64/userspace_io_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
tools/testing/selftests/net/amt.sh [changed mode: 0644->0755]
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/forwarding.config.sample
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/net/mptcp/config
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/toeplitz.c
tools/testing/selftests/net/udpgro_fwd.sh
tools/testing/selftests/net/udpgso.c
tools/testing/selftests/net/udpgso_bench_tx.c
tools/testing/selftests/netfilter/conntrack_vrf.sh
tools/testing/selftests/netfilter/nft_concat_range.sh
tools/testing/selftests/netfilter/nft_zones_many.sh
tools/testing/selftests/tc-testing/config
tools/testing/selftests/tc-testing/tdc.py
tools/testing/selftests/tc-testing/tdc.sh
tools/testing/selftests/vm/userfaultfd.c

index 6277bb2..b344067 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -126,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
+Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
index bd9a490..9f73253 100644 (file)
@@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions
     :alt:   disk-states-8.dot
     :align: center
 
-.. kernel-figure:: node-states-8.dot
-    :alt:   node-states-8.dot
+.. kernel-figure:: peer-states-8.dot
+    :alt:   peer-states-8.dot
     :align: center
diff --git a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot b/Documentation/admin-guide/blockdev/drbd/node-states-8.dot
deleted file mode 100644 (file)
index bfa54e1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-digraph node_states {
-       Secondary -> Primary           [ label = "ioctl_set_state()" ]
-       Primary   -> Secondary         [ label = "ioctl_set_state()" ]
-}
-
-digraph peer_states {
-       Secondary -> Primary           [ label = "recv state packet" ]
-       Primary   -> Secondary         [ label = "recv state packet" ]
-       Primary   -> Unknown           [ label = "connection lost" ]
-       Secondary  -> Unknown          [ label = "connection lost" ]
-       Unknown   -> Primary           [ label = "connected" ]
-       Unknown   -> Secondary         [ label = "connected" ]
-}
diff --git a/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
new file mode 100644 (file)
index 0000000..6dc3954
--- /dev/null
@@ -0,0 +1,8 @@
+digraph peer_states {
+       Secondary -> Primary           [ label = "recv state packet" ]
+       Primary   -> Secondary         [ label = "recv state packet" ]
+       Primary   -> Unknown           [ label = "connection lost" ]
+       Secondary  -> Unknown          [ label = "connection lost" ]
+       Unknown   -> Primary           [ label = "connected" ]
+       Unknown   -> Secondary         [ label = "connected" ]
+}
index 9725c54..2fba824 100644 (file)
                        architectures force reset to be always executed
        i8042.unlock    [HW] Unlock (ignore) the keylock
        i8042.kbdreset  [HW] Reset device connected to KBD port
+       i8042.probe_defer
+                       [HW] Allow deferred probing upon i8042 probe errors
 
        i810=           [HW,DRM]
 
                        Default is 1 (enabled)
 
        kvm-intel.emulate_invalid_guest_state=
-                       [KVM,Intel] Enable emulation of invalid guest states
-                       Default is 0 (disabled)
+                       [KVM,Intel] Disable emulation of invalid guest state.
+                       Ignored if kvm-intel.enable_unrestricted_guest=1, as
+                       guest state is never invalid for unrestricted guests.
+                       This param doesn't apply to nested guests (L2), as KVM
+                       never emulates invalid L2 guest state.
+                       Default is 1 (enabled)
 
        kvm-intel.flexpriority=
                        [KVM,Intel] Disable FlexPriority feature (TPR shadow).
index 17f7cee..76e5eb5 100644 (file)
@@ -249,11 +249,16 @@ except ImportError:
 
 html_static_path = ['sphinx-static']
 
-html_context = {
-    'css_files': [
-        '_static/theme_overrides.css',
-    ],
-}
+html_css_files = [
+    'theme_overrides.css',
+]
+
+if major <= 1 and minor < 8:
+    html_context = {
+        'css_files': [
+            '_static/theme_overrides.css',
+        ],
+    }
 
 # Add any extra paths that contain custom files (such as robots.txt or
 # .htaccess) here, relative to this directory. These files are copied
index 22fc848..82b9531 100644 (file)
@@ -20,9 +20,9 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - apple,t8103-i2c
-      - apple,i2c
+    items:
+      - const: apple,t8103-i2c
+      - const: apple,i2c
 
   reg:
     maxItems: 1
@@ -51,7 +51,7 @@ unevaluatedProperties: false
 examples:
   - |
     i2c@35010000 {
-      compatible = "apple,t8103-i2c";
+      compatible = "apple,t8103-i2c", "apple,i2c";
       reg = <0x35010000 0x4000>;
       interrupt-parent = <&aic>;
       interrupts = <0 627 4>;
index c65921e..81c8729 100644 (file)
@@ -136,7 +136,7 @@ examples:
         samsung,syscon-phandle = <&pmu_system_controller>;
 
         /* NTC thermistor is a hwmon device */
-        ncp15wb473 {
+        thermistor {
             compatible = "murata,ncp15wb473";
             pullup-uv = <1800000>;
             pullup-ohm = <47000>;
index 060a309..dbe7ecc 100644 (file)
@@ -142,7 +142,7 @@ examples:
         down {
             label = "GPIO Key DOWN";
             linux,code = <108>;
-            interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+            interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
         };
     };
 
index 877183c..1ef849d 100644 (file)
@@ -79,6 +79,8 @@ properties:
 
             properties:
               data-lanes:
+                description:
+                  Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
                 items:
                   minItems: 1
                   maxItems: 4
@@ -91,18 +93,6 @@ properties:
             required:
               - data-lanes
 
-            allOf:
-              - if:
-                  properties:
-                    compatible:
-                      contains:
-                        const: fsl,imx7-mipi-csi2
-                then:
-                  properties:
-                    data-lanes:
-                      items:
-                        maxItems: 2
-
       port@1:
         $ref: /schemas/graph.yaml#/properties/port
         description:
index 2766fe4..ee42328 100644 (file)
@@ -91,6 +91,14 @@ properties:
       compensate for the board being designed with the lanes
       swapped.
 
+  enet-phy-lane-no-swap:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      If set, indicates that PHY will disable swap of the
+      TX/RX lanes. This property allows the PHY to work correcly after
+      e.g. wrong bootstrap configuration caused by issues in PCB
+      layout design.
+
   eee-broken-100tx:
     $ref: /schemas/types.yaml#/definitions/flag
     description:
index 04d5654..7990651 100644 (file)
@@ -29,7 +29,7 @@ properties:
           - PHY_TYPE_PCIE
           - PHY_TYPE_SATA
           - PHY_TYPE_SGMII
-          - PHY_TYPE_USB
+          - PHY_TYPE_USB3
       - description: The PHY instance
         minimum: 0
         maximum: 1 # for DP, SATA or USB
index 06eca66..8367a1f 100644 (file)
@@ -105,7 +105,7 @@ examples:
           reg = <0x65>;
           interrupt-parent = <&gpio1>;
           interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
-          ti,watchdog-timer = <0>;
+          ti,watchdog-timeout-ms = <0>;
           ti,sc-ocp-limit-microamp = <2000000>;
           ti,sc-ovp-limit-microvolt = <17800000>;
           monitored-battery = <&bat>;
index 80a63d4..c98929a 100644 (file)
@@ -51,6 +51,19 @@ patternProperties:
     description:
       Properties for single BUCK regulator.
 
+    properties:
+      op_mode:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1, 2, 3]
+        default: 1
+        description: |
+          Describes the different operating modes of the regulator with power
+          mode change in SOC. The different possible values are:
+            0 - always off mode
+            1 - on in normal mode
+            2 - low power mode
+            3 - suspend mode
+
     required:
       - regulator-name
 
@@ -63,6 +76,18 @@ patternProperties:
       Properties for single BUCK regulator.
 
     properties:
+      op_mode:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1, 2, 3]
+        default: 1
+        description: |
+          Describes the different operating modes of the regulator with power
+          mode change in SOC. The different possible values are:
+            0 - always off mode
+            1 - on in normal mode
+            2 - low power mode
+            3 - suspend mode
+
       s5m8767,pmic-ext-control-gpios:
         maxItems: 1
         description: |
index 0e6249d..5e172e9 100644 (file)
@@ -19,6 +19,9 @@ properties:
   clocks:
     maxItems: 1
 
+  interrupts:
+    maxItems: 1
+
   "#sound-dai-cells":
     const: 0
 
index 7f987e7..52a78a2 100644 (file)
@@ -33,6 +33,7 @@ properties:
               - rockchip,rk3328-spi
               - rockchip,rk3368-spi
               - rockchip,rk3399-spi
+              - rockchip,rk3568-spi
               - rockchip,rv1126-spi
           - const: rockchip,rk3066-spi
 
index 136c4e3..786c618 100644 (file)
@@ -11,9 +11,11 @@ systems.  Some systems use variants that don't meet branding requirements,
 and so are not advertised as being I2C but come under different names,
 e.g. TWI (Two Wire Interface), IIC.
 
-The official I2C specification is the `"I2C-bus specification and user
-manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
-published by NXP Semiconductors.
+The latest official I2C specification is the `"I2C-bus specification and user
+manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
+published by NXP Semiconductors. However, you need to log-in to the site to
+access the PDF. An older version of the specification (revision 6) is archived
+`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
 
 SMBus (System Management Bus) is based on the I2C protocol, and is mostly
 a subset of I2C protocols and signaling.  Many I2C devices will work on an
index ddada4a..4fd7b70 100644 (file)
@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
   spin_lock(&p->lock);
   p->count += this_cpu_read(var2);
 
-On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
-which makes the above code fully equivalent. On a PREEMPT_RT kernel
 migrate_disable() ensures that the task is pinned on the current CPU which
 in turn guarantees that the per-CPU access to var1 and var2 are staying on
-the same CPU.
+the same CPU while the task remains preemptible.
 
 The migrate_disable() substitution is not valid for the following
 scenario::
@@ -456,9 +454,8 @@ scenario::
     p = this_cpu_ptr(&var1);
     p->val = func2();
 
-While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
-here migrate_disable() does not protect against reentrancy from a
-preempting task. A correct substitution for this case is::
+This breaks because migrate_disable() does not protect against reentrancy from
+a preempting task. A correct substitution for this case is::
 
   func()
   {
index 31cfd7d..c0a789b 100644 (file)
@@ -196,11 +196,12 @@ ad_actor_sys_prio
 ad_actor_system
 
        In an AD system, this specifies the mac-address for the actor in
-       protocol packet exchanges (LACPDUs). The value cannot be NULL or
-       multicast. It is preferred to have the local-admin bit set for this
-       mac but driver does not enforce it. If the value is not given then
-       system defaults to using the masters' mac address as actors' system
-       address.
+       protocol packet exchanges (LACPDUs). The value cannot be a multicast
+       address. If the all-zeroes MAC is specified, bonding will internally
+       use the MAC of the bond itself. It is preferred to have the
+       local-admin bit set for this mac but driver does not enforce it. If
+       the value is not given then system defaults to using the masters'
+       mac address as actors' system address.
 
        This parameter has effect only in 802.3ad mode and is available through
        SysFs interface.
index d638b5a..1996477 100644 (file)
@@ -183,6 +183,7 @@ PHY and allows physical transmission and reception of Ethernet frames.
   IRQ config, enable, reset
 
 DPNI (Datapath Network Interface)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Contains TX/RX queues, network interface configuration, and RX buffer pool
 configuration mechanisms.  The TX/RX queues are in memory and are identified
 by queue number.
index f1d5233..0a233b1 100644 (file)
@@ -440,6 +440,22 @@ NOTE: For 82599-based network connections, if you are enabling jumbo frames in
 a virtual function (VF), jumbo frames must first be enabled in the physical
 function (PF). The VF MTU setting cannot be larger than the PF MTU.
 
+NBASE-T Support
+---------------
+The ixgbe driver supports NBASE-T on some devices. However, the advertisement
+of NBASE-T speeds is suppressed by default, to accommodate broken network
+switches which cannot cope with advertised NBASE-T speeds. Use the ethtool
+command to enable advertising NBASE-T speeds on devices which support it::
+
+  ethtool -s eth? advertise 0x1800000001028
+
+On Linux systems with INTERFACES(5), this can be specified as a pre-up command
+in /etc/network/interfaces so that the interface is always brought up with
+NBASE-T support, e.g.::
+
+  iface eth? inet dhcp
+       pre-up ethtool -s eth? advertise 0x1800000001028 || true
+
 Generic Receive Offload, aka GRO
 --------------------------------
 The driver supports the in-kernel software implementation of GRO. GRO has
index c044311..2572eec 100644 (file)
@@ -25,7 +25,8 @@ ip_default_ttl - INTEGER
 ip_no_pmtu_disc - INTEGER
        Disable Path MTU Discovery. If enabled in mode 1 and a
        fragmentation-required ICMP is received, the PMTU to this
-       destination will be set to min_pmtu (see below). You will need
+       destination will be set to the smallest of the old MTU to
+       this destination and min_pmtu (see below). You will need
        to raise min_pmtu to the smallest interface MTU on your system
        manually if you want to avoid locally generated fragments.
 
@@ -49,7 +50,8 @@ ip_no_pmtu_disc - INTEGER
        Default: FALSE
 
 min_pmtu - INTEGER
-       default 552 - minimum discovered Path MTU
+       default 552 - minimum Path MTU. Unless this is changed mannually,
+       each cached pmtu will never be lower than this setting.
 
 ip_forward_use_pmtu - BOOLEAN
        By default we don't trust protocol path MTUs while forwarding
index 80b1335..f580920 100644 (file)
@@ -582,8 +582,8 @@ Time stamps for outgoing packets are to be generated as follows:
   and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
 - As soon as the driver has sent the packet and/or obtained a
   hardware time stamp for it, it passes the time stamp back by
-  calling skb_hwtstamp_tx() with the original skb, the raw
-  hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+  calling skb_tstamp_tx() with the original skb, the raw
+  hardware time stamp. skb_tstamp_tx() clones the original skb and
   adds the timestamps, therefore the original skb has to be freed now.
   If obtaining the hardware time stamp somehow fails, then the driver
   should not fall back to software time stamping. The rationale is that
index b398b85..cf908d7 100644 (file)
@@ -35,6 +35,7 @@ GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
 bison                  2.0              bison --version
+pahole                 1.16             pahole --version
 util-linux             2.10o            fdformat --version
 kmod                   13               depmod -V
 e2fsprogs              1.41.4           e2fsck -V
@@ -108,6 +109,16 @@ Bison
 Since Linux 4.16, the build system generates parsers
 during build.  This requires bison 2.0 or later.
 
+pahole:
+-------
+
+Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system
+generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel
+modules as well.  This requires pahole v1.16 or later.
+
+It is found in the 'dwarves' or 'pahole' distro packages or from
+https://fedorapeople.org/~acme/dwarves/.
+
 Perl
 ----
 
index da085d6..6b3aaed 100644 (file)
@@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read
 Documentation/process/submit-checklist.rst
 for a list of items to check before submitting code.  If you are submitting
 a driver, also read Documentation/process/submitting-drivers.rst; for device
-tree binding patches, read Documentation/process/submitting-patches.rst.
+tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.rst.
 
 This documentation assumes that you're using ``git`` to prepare your patches.
 If you're unfamiliar with ``git``, you would be well-advised to learn how to
index 0ea967d..d253359 100644 (file)
@@ -326,6 +326,8 @@ usi-headset
     Headset support on USI machines
 dual-codecs
     Lenovo laptops with dual codecs
+alc285-hp-amp-init
+    HP laptops which require speaker amplifier initialization (ALC285)
 
 ALC680
 ======
index 43007f2..dd36acc 100644 (file)
@@ -3066,7 +3066,7 @@ F:        Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
 F:     drivers/phy/qualcomm/phy-ath79-usb.c
 
 ATHEROS ATH GENERIC UTILITIES
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/net/wireless/ath/*
@@ -3081,7 +3081,7 @@ W:        https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
 F:     drivers/net/wireless/ath/ath5k/
 
 ATHEROS ATH6KL WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
@@ -3769,7 +3769,8 @@ S:        Supported
 F:     drivers/net/wireless/broadcom/brcm80211/
 
 BROADCOM BRCMSTB GPIO DRIVER
-M:     Gregory Fong <gregory.0xf0@gmail.com>
+M:     Doug Berger <opendmb@gmail.com>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -9329,7 +9330,6 @@ S:        Maintained
 F:     drivers/iio/pressure/dps310.c
 
 INFINIBAND SUBSYSTEM
-M:     Doug Ledford <dledford@redhat.com>
 M:     Jason Gunthorpe <jgg@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -10280,9 +10280,9 @@ F:      lib/Kconfig.kcsan
 F:     scripts/Makefile.kcsan
 
 KDUMP
-M:     Dave Young <dyoung@redhat.com>
 M:     Baoquan He <bhe@redhat.com>
 R:     Vivek Goyal <vgoyal@redhat.com>
+R:     Dave Young <dyoung@redhat.com>
 L:     kexec@lists.infradead.org
 S:     Maintained
 W:     http://lse.sourceforge.net/kdump/
@@ -12180,8 +12180,8 @@ F:      drivers/net/ethernet/mellanox/mlx5/core/fpga/*
 F:     include/linux/mlx5/mlx5_ifc_fpga.h
 
 MELLANOX ETHERNET SWITCH DRIVERS
-M:     Jiri Pirko <jiri@nvidia.com>
 M:     Ido Schimmel <idosch@nvidia.com>
+M:     Petr Machata <petrm@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -13249,7 +13249,7 @@ F:      include/uapi/linux/if_*
 F:     include/uapi/linux/netdevice.h
 
 NETWORKING DRIVERS (WIRELESS)
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 Q:     http://patchwork.kernel.org/project/linux-wireless/list/
@@ -14846,7 +14846,7 @@ PCIE DRIVER FOR MEDIATEK
 M:     Ryder Lee <ryder.lee@mediatek.com>
 M:     Jianjun Wang <jianjun.wang@mediatek.com>
 L:     linux-pci@vger.kernel.org
-L:     linux-mediatek@lists.infradead.org
+L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     Documentation/devicetree/bindings/pci/mediatek*
 F:     drivers/pci/controller/*mediatek*
@@ -15705,7 +15705,7 @@ T:      git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/qt1010*
 
 QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     ath10k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -15713,7 +15713,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath10k/
 
 QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     ath11k@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -15771,6 +15771,15 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/net/qcom,ethqos.txt
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
 
+QUALCOMM FASTRPC DRIVER
+M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M:     Amol Maheshwari <amahesh@qti.qualcomm.com>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
+F:     drivers/misc/fastrpc.c
+F:     include/uapi/misc/fastrpc.h
+
 QUALCOMM GENERIC INTERFACE I2C DRIVER
 M:     Akash Asthana <akashast@codeaurora.org>
 M:     Mukesh Savaliya <msavaliy@codeaurora.org>
@@ -15877,7 +15886,7 @@ F:      Documentation/devicetree/bindings/media/*venus*
 F:     drivers/media/platform/qcom/venus/
 
 QUALCOMM WCN36XX WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     wcn36xx@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
@@ -16629,7 +16638,6 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
-M:     Julian Wiedmann <jwi@linux.ibm.com>
 M:     Alexandra Winter <wintera@linux.ibm.com>
 M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
@@ -16641,7 +16649,6 @@ F:      include/net/iucv/
 F:     net/iucv/
 
 S390 NETWORK DRIVERS
-M:     Julian Wiedmann <jwi@linux.ibm.com>
 M:     Alexandra Winter <wintera@linux.ibm.com>
 M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
@@ -17417,7 +17424,7 @@ F:      drivers/video/fbdev/sm712*
 SILVACO I3C DUAL-ROLE MASTER
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
 M:     Conor Culhane <conor.culhane@silvaco.com>
-L:     linux-i3c@lists.infradead.org
+L:     linux-i3c@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
 F:     drivers/i3c/master/svc-i3c-master.c
@@ -21053,7 +21060,7 @@ S:      Maintained
 F:     arch/x86/kernel/cpu/zhaoxin.c
 
 ZONEFS FILESYSTEM
-M:     Damien Le Moal <damien.lemoal@wdc.com>
+M:     Damien Le Moal <damien.lemoal@opensource.wdc.com>
 M:     Naohiro Aota <naohiro.aota@wdc.com>
 R:     Johannes Thumshirn <jth@kernel.org>
 L:     linux-fsdevel@vger.kernel.org
index 8e35d78..16d7f83 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc8
 NAME = Gobble Gobble
 
 # *DOCUMENTATION*
@@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG)      := -fstack-protector-strong
 KBUILD_CFLAGS += $(stackp-flags-y)
 
 KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
 
 ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += -Qunused-arguments
@@ -1374,17 +1374,17 @@ endif
 
 ifneq ($(dtstree),)
 
-%.dtb: dt_binding_check include/config/kernel.release scripts_dtc
-       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtb: include/config/kernel.release scripts_dtc
+       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
 
-%.dtbo: dt_binding_check include/config/kernel.release scripts_dtc
-       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtbo: include/config/kernel.release scripts_dtc
+       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
 
 PHONY += dtbs dtbs_install dtbs_check
 dtbs: include/config/kernel.release scripts_dtc
        $(Q)$(MAKE) $(build)=$(dtstree)
 
-ifneq ($(filter dtbs_check %.dtb %.dtbo, $(MAKECMDGOALS)),)
+ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),)
 export CHECK_DTBS=y
 dtbs: dt_binding_check
 endif
index b62a0db..ec6fba5 100644 (file)
 
                ethphy: ethernet-phy@1 {
                        reg = <1>;
+                       qca,clk-out-frequency = <125000000>;
                };
        };
 };
index 7648e8a..cf6571c 100644 (file)
                                label = "cpu";
                                ethernet = <&fec>;
                                phy-mode = "rgmii-id";
+                               rx-internal-delay-ps = <2000>;
+                               tx-internal-delay-ps = <2000>;
 
                                fixed-link {
                                        speed = <100>;
index eb025a9..7328d4e 100644 (file)
@@ -82,6 +82,6 @@
 #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS                         0x01F4 0x0480 0x0000 0x9 0x0
 #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK                        0x01F8 0x0484 0x0000 0x9 0x0
 #define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0                       0x01FC 0x0488 0x0000 0x9 0x0
-#define MX6ULL_PAD_CSI_DATA07__ESAI_T                           0x0200 0x048C 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0                           0x0200 0x048C 0x0000 0x9 0x0
 
 #endif /* __DTS_IMX6ULL_PINFUNC_H */
index ff0ffb2..1ea32ff 100644 (file)
@@ -91,6 +91,8 @@
                                /* Internal port connected to eth2 */
                                ethernet = <&enet2>;
                                phy-mode = "rgmii";
+                               rx-internal-delay-ps = <0>;
+                               tx-internal-delay-ps = <0>;
                                reg = <4>;
 
                                fixed-link {
index 2b64564..2a74552 100644 (file)
@@ -12,7 +12,7 @@
        flash0: n25q00@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00aa";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index 90e676e..1b02d46 100644 (file)
        flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q256a";
+               compatible = "micron,n25q256a", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index 6f138b2..51bb436 100644 (file)
        flash0: n25q00@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <0>;      /* chip select */
                spi-max-frequency = <100000000>;
 
index c155ff0..cae9ddd 100644 (file)
        flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index 8d5d399..ca18b95 100644 (file)
@@ -80,7 +80,7 @@
        flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q256a";
+               compatible = "micron,n25q256a", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
                m25p,fast-read;
index 99a7175..3f7aa7b 100644 (file)
        flash0: n25q512a@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q512a";
+               compatible = "micron,n25q512a", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index a060718..25874e1 100644 (file)
        n25q128@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q128";
+               compatible = "micron,n25q128", "jedec,spi-nor";
                reg = <0>;              /* chip select */
                spi-max-frequency = <100000000>;
                m25p,fast-read;
        n25q00@1 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <1>;              /* chip select */
                spi-max-frequency = <100000000>;
                m25p,fast-read;
index a6f3b17..27218ea 100644 (file)
@@ -17,7 +17,6 @@
 
 #ifdef CONFIG_EFI
 void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
 
 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
index deff286..5cd0578 100644 (file)
@@ -596,11 +596,9 @@ call_fpe:
        tstne   r0, #0x04000000                 @ bit 26 set on both ARM and Thumb-2
        reteq   lr
        and     r8, r0, #0x00000f00             @ mask out CP number
- THUMB(        lsr     r8, r8, #8              )
        mov     r7, #1
-       add     r6, r10, #TI_USED_CP
- ARM(  strb    r7, [r6, r8, lsr #8]    )       @ set appropriate used_cp[]
- THUMB(        strb    r7, [r6, r8]            )       @ set appropriate used_cp[]
+       add     r6, r10, r8, lsr #8             @ add used_cp[] array offset first
+       strb    r7, [r6, #TI_USED_CP]           @ set appropriate used_cp[]
 #ifdef CONFIG_IWMMXT
        @ Test if we need to give access to iWMMXt coprocessors
        ldr     r5, [r10, #TI_FLAGS]
@@ -609,7 +607,7 @@ call_fpe:
        bcs     iwmmxt_task_enable
 #endif
  ARM(  add     pc, pc, r8, lsr #6      )
- THUMB(        lsl     r8, r8, #2              )
+ THUMB(        lsr     r8, r8, #6              )
  THUMB(        add     pc, r8                  )
        nop
 
index fadfee9..950bef8 100644 (file)
@@ -114,6 +114,7 @@ ENTRY(secondary_startup)
        add     r12, r12, r10
        ret     r12
 1:     bl      __after_proc_init
+       ldr     r7, __secondary_data            @ reload r7
        ldr     sp, [r7, #12]                   @ set up the stack pointer
        ldr     r0, [r7, #16]                   @ set up task pointer
        mov     fp, #0
index d608568..5ec58d0 100644 (file)
@@ -189,7 +189,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
        rockchip_boot_fn = __pa_symbol(secondary_startup);
 
        /* copy the trampoline to sram, that runs during startup of the core */
-       memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
+       memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
        flush_cache_all();
        outer_clean_range(0, trampoline_sz);
 
index 1aa8b70..54e3910 100644 (file)
@@ -161,7 +161,6 @@ config ARCH_MEDIATEK
 
 config ARCH_MESON
        bool "Amlogic Platforms"
-       select COMMON_CLK
        help
          This enables support for the arm64 based Amlogic SoCs
          such as the s905, S905X/D, S912, A113X/D or S905X/D2
index d13980e..7ec5ac8 100644 (file)
@@ -69,7 +69,7 @@
        pinctrl-0 = <&emac_rgmii_pins>;
        phy-supply = <&reg_gmac_3v3>;
        phy-handle = <&ext_rgmii_phy>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        status = "okay";
 };
 
index 52ebe37..561eec2 100644 (file)
                                        type = "critical";
                                };
                        };
-               };
 
-               cpu_cooling_maps: cooling-maps {
-                       map0 {
-                               trip = <&cpu_passive>;
-                               cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-                       };
+                       cpu_cooling_maps: cooling-maps {
+                               map0 {
+                                       trip = <&cpu_passive>;
+                                       cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+                               };
 
-                       map1 {
-                               trip = <&cpu_hot>;
-                               cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+                               map1 {
+                                       trip = <&cpu_hot>;
+                                       cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+                               };
                        };
                };
        };
index 33a80f9..02c3630 100644 (file)
@@ -60,7 +60,7 @@
 
 &port02 {
        bus-range = <3 3>;
-       ethernet0: pci@0,0 {
+       ethernet0: ethernet@0,0 {
                reg = <0x30000 0x0 0x0 0x0 0x0>;
                /* To be filled by the loader */
                local-mac-address = [00 10 18 00 00 00];
index fc8b2bb..8b61e7f 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright The Asahi Linux Contributors
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
                        apple,npins = <212>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 190 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 191 IRQ_TYPE_LEVEL_HIGH>,
                        apple,npins = <42>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 268 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 269 IRQ_TYPE_LEVEL_HIGH>,
                        apple,npins = <23>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 330 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 331 IRQ_TYPE_LEVEL_HIGH>,
                        apple,npins = <16>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 391 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 392 IRQ_TYPE_LEVEL_HIGH>,
                        port00: pci@0,0 {
                                device_type = "pci";
                                reg = <0x0 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 152 0>;
+                               reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>;
                                max-link-speed = <2>;
 
                                #address-cells = <3>;
                        port01: pci@1,0 {
                                device_type = "pci";
                                reg = <0x800 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 153 0>;
+                               reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>;
                                max-link-speed = <2>;
 
                                #address-cells = <3>;
                        port02: pci@2,0 {
                                device_type = "pci";
                                reg = <0x1000 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 33 0>;
+                               reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>;
                                max-link-speed = <1>;
 
                                #address-cells = <3>;
index 3063851..d3f03dc 100644 (file)
@@ -38,7 +38,6 @@
                powerdn {
                        label = "External Power Down";
                        gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
-                       interrupts = <&gpio1 17 IRQ_TYPE_EDGE_FALLING>;
                        linux,code = <KEY_POWER>;
                };
 
@@ -46,7 +45,6 @@
                admin {
                        label = "ADMIN button";
                        gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
-                       interrupts = <&gpio3 8 IRQ_TYPE_EDGE_RISING>;
                        linux,code = <KEY_WPS_BUTTON>;
                };
        };
index b21be03..042c486 100644 (file)
                                reg = <2>;
                                ethernet = <&dpmac17>;
                                phy-mode = "rgmii-id";
+                               rx-internal-delay-ps = <2000>;
+                               tx-internal-delay-ps = <2000>;
 
                                fixed-link {
                                        speed = <1000>;
                                reg = <2>;
                                ethernet = <&dpmac18>;
                                phy-mode = "rgmii-id";
+                               rx-internal-delay-ps = <2000>;
+                               tx-internal-delay-ps = <2000>;
 
                                fixed-link {
                                        speed = <1000>;
index dc8661e..2433e6f 100644 (file)
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(16)>;
-                       scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+                       scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
                        status = "disabled";
                };
 
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(16)>;
-                       scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
+                       scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
                        status = "disabled";
                };
 
index 972766b..71bf497 100644 (file)
                                                  <&clk IMX8MQ_VIDEO_PLL1>,
                                                  <&clk IMX8MQ_VIDEO_PLL1_OUT>;
                                assigned-clock-rates = <0>, <0>, <0>, <594000000>;
-                               interconnects = <&noc IMX8MQ_ICM_LCDIF &noc IMX8MQ_ICS_DRAM>;
-                               interconnect-names = "dram";
                                status = "disabled";
 
                                port@0 {
index 665b2e6..ea68209 100644 (file)
@@ -97,7 +97,7 @@
                regulator-max-microvolt = <3300000>;
                regulator-always-on;
                regulator-boot-on;
-               vim-supply = <&vcc_io>;
+               vin-supply = <&vcc_io>;
        };
 
        vdd_core: vdd-core {
index d5c7648..f1fcc6b 100644 (file)
 &sdhci {
        bus-width = <8>;
        mmc-hs400-1_8v;
-       mmc-hs400-enhanced-strobe;
        non-removable;
        status = "okay";
 };
index 63c7681..b6ac00f 100644 (file)
                clock-output-names = "xin32k", "rk808-clkout2";
                pinctrl-names = "default";
                pinctrl-0 = <&pmic_int_l>;
+               rockchip,system-power-controller;
                vcc1-supply = <&vcc5v0_sys>;
                vcc2-supply = <&vcc5v0_sys>;
                vcc3-supply = <&vcc5v0_sys>;
index 7c93f84..e890166 100644 (file)
@@ -55,7 +55,7 @@
                regulator-boot-on;
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               vim-supply = <&vcc3v3_sys>;
+               vin-supply = <&vcc3v3_sys>;
        };
 
        vcc3v3_sys: vcc3v3-sys {
index 98136c8..6a434be 100644 (file)
        status = "okay";
 
        bt656-supply = <&vcc_3v0>;
-       audio-supply = <&vcc_3v0>;
+       audio-supply = <&vcc1v8_codec>;
        sdmmc-supply = <&vcc_sdio>;
        gpio1830-supply = <&vcc_3v0>;
 };
index d3e1825..ad55079 100644 (file)
@@ -14,7 +14,6 @@
 
 #ifdef CONFIG_EFI
 extern void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
 #else
 #define efi_init()
 #endif
index 63634b4..59c648d 100644 (file)
@@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image,
                                           initrd_len, cmdline, 0);
        if (!dtb) {
                pr_err("Preparing for new dtb failed\n");
+               ret = -EINVAL;
                goto out_err;
        }
 
index e5fbf86..2020af8 100644 (file)
@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
 
 asmlinkage void do_trap_fpe(struct pt_regs *regs)
 {
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
        return fpu_fpe(regs);
 #else
        do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
 
 asmlinkage void do_trap_priv(struct pt_regs *regs)
 {
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
        if (user_mode(regs) && fpu_libc_helper(regs))
                return;
 #endif
index 05d14c2..f7af11e 100644 (file)
@@ -6,5 +6,7 @@
 #define PCI_IOSIZE     SZ_64K
 #define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
 
+#define pci_remap_iospace pci_remap_iospace
+
 #include <asm/mach-generic/spaces.h>
 #endif
index 421231f..9ffc819 100644 (file)
 #include <linux/list.h>
 #include <linux/of.h>
 
-#ifdef CONFIG_PCI_DRIVERS_GENERIC
-#define pci_remap_iospace pci_remap_iospace
-#endif
-
 #ifdef CONFIG_PCI_DRIVERS_LEGACY
 
 /*
index 6f3a7b0..a37fe20 100644 (file)
@@ -98,7 +98,7 @@ do {                                                          \
 #define emit(...) __emit(__VA_ARGS__)
 
 /* Workaround for R10000 ll/sc errata */
-#ifdef CONFIG_WAR_R10000
+#ifdef CONFIG_WAR_R10000_LLSC
 #define LLSC_beqz      beqzl
 #else
 #define LLSC_beqz      beqz
index 18eb8a4..d2d68ba 100644 (file)
@@ -47,6 +47,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
        pci_read_bridge_bases(bus);
 }
 
+#ifdef pci_remap_iospace
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
 {
        unsigned long vaddr;
@@ -60,3 +61,4 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
        set_io_port_base(vaddr);
        return 0;
 }
+#endif
index b2188da..011dc32 100644 (file)
@@ -85,11 +85,6 @@ config MMU
 config STACK_GROWSUP
        def_bool y
 
-config ARCH_DEFCONFIG
-       string
-       default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
-       default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
-
 config GENERIC_LOCKBREAK
        bool
        default y
index 70cf8f0..9cd4dd6 100644 (file)
@@ -14,7 +14,7 @@ static inline void
 _futex_spin_lock(u32 __user *uaddr)
 {
        extern u32 lws_lock_start[];
-       long index = ((long)uaddr & 0x3f8) >> 1;
+       long index = ((long)uaddr & 0x7f8) >> 1;
        arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
        preempt_disable();
        arch_spin_lock(s);
@@ -24,7 +24,7 @@ static inline void
 _futex_spin_unlock(u32 __user *uaddr)
 {
        extern u32 lws_lock_start[];
-       long index = ((long)uaddr & 0x3f8) >> 1;
+       long index = ((long)uaddr & 0x7f8) >> 1;
        arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
        arch_spin_unlock(s);
        preempt_enable();
index d2497b3..65c88ca 100644 (file)
@@ -472,7 +472,7 @@ lws_start:
        extrd,u %r1,PSW_W_BIT,1,%r1
        /* sp must be aligned on 4, so deposit the W bit setting into
         * the bottom of sp temporarily */
-       or,ev   %r1,%r30,%r30
+       or,od   %r1,%r30,%r30
 
        /* Clip LWS number to a 32-bit value for 32-bit processes */
        depdi   0, 31, 32, %r20
index b11fb26..892b7fc 100644 (file)
@@ -730,6 +730,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
                        }
                        mmap_read_unlock(current->mm);
                }
+               /* CPU could not fetch instruction, so clear stale IIR value. */
+               regs->iir = 0xbaadf00d;
                fallthrough;
        case 27: 
                /* Data memory protection ID trap */
index 6baa676..5d77d3f 100644 (file)
@@ -422,11 +422,17 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
                              const char *name)
 {
        long reladdr;
+       func_desc_t desc;
+       int i;
 
        if (is_mprofile_ftrace_call(name))
                return create_ftrace_stub(entry, addr, me);
 
-       memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
+       for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) {
+               if (patch_instruction(&entry->jump[i],
+                                     ppc_inst(ppc64_stub_insns[i])))
+                       return 0;
+       }
 
        /* Stub uses address relative to r2. */
        reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@@ -437,10 +443,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
        }
        pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
 
-       entry->jump[0] |= PPC_HA(reladdr);
-       entry->jump[1] |= PPC_LO(reladdr);
-       entry->funcdata = func_desc(addr);
-       entry->magic = STUB_MAGIC;
+       if (patch_instruction(&entry->jump[0],
+                             ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
+               return 0;
+
+       if (patch_instruction(&entry->jump[1],
+                         ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
+               return 0;
+
+       // func_desc_t is 8 bytes if ABIv2, else 16 bytes
+       desc = func_desc(addr);
+       for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
+               if (patch_instruction(((u32 *)&entry->funcdata) + i,
+                                     ppc_inst(((u32 *)(&desc))[i])))
+                       return 0;
+       }
+
+       if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
+               return 0;
 
        return 1;
 }
@@ -495,8 +515,11 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
                        me->name, *instruction, instruction);
                return 0;
        }
+
        /* ld r2,R2_STACK_OFFSET(r1) */
-       *instruction = PPC_INST_LD_TOC;
+       if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
+               return 0;
+
        return 1;
 }
 
@@ -636,9 +659,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        }
 
                        /* Only replace bits 2 through 26 */
-                       *(uint32_t *)location
-                               = (*(uint32_t *)location & ~0x03fffffc)
+                       value = (*(uint32_t *)location & ~0x03fffffc)
                                | (value & 0x03fffffc);
+
+                       if (patch_instruction((u32 *)location, ppc_inst(value)))
+                               return -EFAULT;
+
                        break;
 
                case R_PPC64_REL64:
index bf25119..32bfb21 100644 (file)
@@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
 {
        pte_t pte = __pte(st->current_flags);
 
-       if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
+       if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
                return;
 
        if (!pte_write(pte) || !pte_exec(pte))
index 83f4a63..d7081e9 100644 (file)
@@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu)
        local_irq_save(flags);
        hard_irq_disable();
 
-       if (qoriq_pm_ops)
+       if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
                qoriq_pm_ops->cpu_up_prepare(cpu);
 
        /* if cpu is not spinning, reset it */
@@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr)
                booting_thread_hwid = cpu_thread_in_core(nr);
                primary = cpu_first_thread_sibling(nr);
 
-               if (qoriq_pm_ops)
+               if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
                        qoriq_pm_ops->cpu_up_prepare(nr);
 
                /*
index ba304d4..ced0d4e 100644 (file)
@@ -76,6 +76,7 @@
                spi-max-frequency = <20000000>;
                voltage-ranges = <3300 3300>;
                disable-wp;
+               gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
        };
 };
 
index 4f66919..6bfa1f2 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (c) 2020 SiFive, Inc */
 
 #include "fu740-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 /* Clock frequency (in Hz) of the PCB crystal for rtcclk */
        temperature-sensor@4c {
                compatible = "ti,tmp451";
                reg = <0x4c>;
+               vcc-supply = <&vdd_bpro>;
                interrupt-parent = <&gpio>;
                interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
        };
 
+       eeprom@54 {
+               compatible = "microchip,24c02", "atmel,24c02";
+               reg = <0x54>;
+               vcc-supply = <&vdd_bpro>;
+               label = "board-id";
+               pagesize = <16>;
+               read-only;
+               size = <256>;
+       };
+
        pmic@58 {
                compatible = "dlg,da9063";
                reg = <0x58>;
                interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
 
-               regulators {
-                       vdd_bcore1: bcore1 {
-                               regulator-min-microvolt = <900000>;
-                               regulator-max-microvolt = <900000>;
-                               regulator-min-microamp = <5000000>;
-                               regulator-max-microamp = <5000000>;
-                               regulator-always-on;
-                       };
+               onkey {
+                       compatible = "dlg,da9063-onkey";
+               };
 
-                       vdd_bcore2: bcore2 {
-                               regulator-min-microvolt = <900000>;
-                               regulator-max-microvolt = <900000>;
-                               regulator-min-microamp = <5000000>;
-                               regulator-max-microamp = <5000000>;
+               rtc {
+                       compatible = "dlg,da9063-rtc";
+               };
+
+               wdt {
+                       compatible = "dlg,da9063-watchdog";
+               };
+
+               regulators {
+                       vdd_bcore: bcores-merged {
+                               regulator-min-microvolt = <1050000>;
+                               regulator-max-microvolt = <1050000>;
+                               regulator-min-microamp = <4800000>;
+                               regulator-max-microamp = <4800000>;
                                regulator-always-on;
                        };
 
                        vdd_bpro: bpro {
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <2500000>;
-                               regulator-max-microamp = <2500000>;
+                               regulator-min-microamp = <2400000>;
+                               regulator-max-microamp = <2400000>;
                                regulator-always-on;
                        };
 
                        vdd_bperi: bperi {
-                               regulator-min-microvolt = <1050000>;
-                               regulator-max-microvolt = <1050000>;
+                               regulator-min-microvolt = <1060000>;
+                               regulator-max-microvolt = <1060000>;
                                regulator-min-microamp = <1500000>;
                                regulator-max-microamp = <1500000>;
                                regulator-always-on;
                        };
 
-                       vdd_bmem: bmem {
-                               regulator-min-microvolt = <1200000>;
-                               regulator-max-microvolt = <1200000>;
-                               regulator-min-microamp = <3000000>;
-                               regulator-max-microamp = <3000000>;
-                               regulator-always-on;
-                       };
-
-                       vdd_bio: bio {
+                       vdd_bmem_bio: bmem-bio-merged {
                                regulator-min-microvolt = <1200000>;
                                regulator-max-microvolt = <1200000>;
                                regulator-min-microamp = <3000000>;
                        vdd_ldo1: ldo1 {
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <100000>;
-                               regulator-max-microamp = <100000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo2: ldo2 {
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo3: ldo3 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo4: ldo4 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <2500000>;
+                               regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo5: ldo5 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <100000>;
-                               regulator-max-microamp = <100000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo6: ldo6 {
-                               regulator-min-microvolt = <3300000>;
-                               regulator-max-microvolt = <3300000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo7: ldo7 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo8: ldo8 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ld09: ldo9 {
                                regulator-min-microvolt = <1050000>;
                                regulator-max-microvolt = <1050000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-always-on;
                        };
 
                        vdd_ldo10: ldo10 {
                                regulator-min-microvolt = <1000000>;
                                regulator-max-microvolt = <1000000>;
-                               regulator-min-microamp = <300000>;
-                               regulator-max-microamp = <300000>;
+                               regulator-always-on;
                        };
 
                        vdd_ldo11: ldo11 {
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
-                               regulator-min-microamp = <300000>;
-                               regulator-max-microamp = <300000>;
                                regulator-always-on;
                        };
                };
                spi-max-frequency = <20000000>;
                voltage-ranges = <3300 3300>;
                disable-wp;
+               gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
        };
 };
 
 
 &gpio {
        status = "okay";
+       gpio-line-names = "J29.1", "PMICNTB", "PMICSHDN", "J8.1", "J8.3",
+               "PCIe_PWREN", "THERM", "UBRDG_RSTN", "PCIe_PERSTN",
+               "ULPI_RSTN", "J8.2", "UHUB_RSTN", "GEMGXL_RST", "J8.4",
+               "EN_VDD_SD", "SD_CD";
 };
index 49b398f..cc4f678 100644 (file)
@@ -13,7 +13,6 @@
 
 #ifdef CONFIG_EFI
 extern void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
 #else
 #define efi_init()
 #endif
index b626bc6..e45cc27 100644 (file)
@@ -117,6 +117,7 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
 CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
@@ -511,6 +512,7 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
index 0056cab..1c750bf 100644 (file)
@@ -109,6 +109,7 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
 CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
@@ -502,6 +503,7 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
index 5510c7d..21d62d8 100644 (file)
@@ -290,7 +290,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                return;
 
        regs = ftrace_get_regs(fregs);
-       preempt_disable_notrace();
        p = get_kprobe((kprobe_opcode_t *)ip);
        if (unlikely(!p) || kprobe_disabled(p))
                goto out;
@@ -318,7 +317,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
        }
        __this_cpu_write(current_kprobe, NULL);
 out:
-       preempt_enable_notrace();
        ftrace_test_recursion_unlock(bit);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
index 0df83ec..cb70996 100644 (file)
@@ -138,7 +138,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
        int from_idle;
 
-       irq_enter();
+       irq_enter_rcu();
 
        if (user_mode(regs)) {
                update_timer_sys();
@@ -158,7 +158,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
                        do_irq_async(regs, IO_INTERRUPT);
        } while (MACHINE_IS_LPAR && irq_pending(regs));
 
-       irq_exit();
+       irq_exit_rcu();
+
        set_irq_regs(old_regs);
        irqentry_exit(regs, state);
 
@@ -172,7 +173,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
        int from_idle;
 
-       irq_enter();
+       irq_enter_rcu();
 
        if (user_mode(regs)) {
                update_timer_sys();
@@ -190,7 +191,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
 
        do_irq_async(regs, EXT_INTERRUPT);
 
-       irq_exit();
+       irq_exit_rcu();
        set_irq_regs(old_regs);
        irqentry_exit(regs, state);
 
index 9975ad2..8f43575 100644 (file)
@@ -7,6 +7,8 @@
  * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
  */
 
+#define pr_fmt(fmt)    "kexec: " fmt
+
 #include <linux/elf.h>
 #include <linux/errno.h>
 #include <linux/kexec.h>
@@ -290,8 +292,16 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
                                     const Elf_Shdr *relsec,
                                     const Elf_Shdr *symtab)
 {
+       const char *strtab, *name, *shstrtab;
+       const Elf_Shdr *sechdrs;
        Elf_Rela *relas;
        int i, r_type;
+       int ret;
+
+       /* String & section header string table */
+       sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+       strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
+       shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
 
        relas = (void *)pi->ehdr + relsec->sh_offset;
 
@@ -304,15 +314,27 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
                sym = (void *)pi->ehdr + symtab->sh_offset;
                sym += ELF64_R_SYM(relas[i].r_info);
 
-               if (sym->st_shndx == SHN_UNDEF)
+               if (sym->st_name)
+                       name = strtab + sym->st_name;
+               else
+                       name = shstrtab + sechdrs[sym->st_shndx].sh_name;
+
+               if (sym->st_shndx == SHN_UNDEF) {
+                       pr_err("Undefined symbol: %s\n", name);
                        return -ENOEXEC;
+               }
 
-               if (sym->st_shndx == SHN_COMMON)
+               if (sym->st_shndx == SHN_COMMON) {
+                       pr_err("symbol '%s' in common section\n", name);
                        return -ENOEXEC;
+               }
 
                if (sym->st_shndx >= pi->ehdr->e_shnum &&
-                   sym->st_shndx != SHN_ABS)
+                   sym->st_shndx != SHN_ABS) {
+                       pr_err("Invalid section %d for symbol %s\n",
+                              sym->st_shndx, name);
                        return -ENOEXEC;
+               }
 
                loc = pi->purgatory_buf;
                loc += section->sh_offset;
@@ -326,7 +348,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
                addr = section->sh_addr + relas[i].r_offset;
 
                r_type = ELF64_R_TYPE(relas[i].r_info);
-               arch_kexec_do_relocs(r_type, loc, val, addr);
+
+               if (r_type == R_390_PLT32DBL)
+                       r_type = R_390_PC32DBL;
+
+               ret = arch_kexec_do_relocs(r_type, loc, val, addr);
+               if (ret) {
+                       pr_err("Unknown rela relocation: %d\n", r_type);
+                       return -ENOEXEC;
+               }
        }
        return 0;
 }
index 7399327..5c2ccb8 100644 (file)
@@ -1932,6 +1932,7 @@ config EFI
        depends on ACPI
        select UCS2_STRING
        select EFI_RUNTIME_WRAPPERS
+       select ARCH_USE_MEMREMAP_PROT
        help
          This enables the kernel to use EFI runtime services that are
          available (such as the EFI variable services).
index 4d0b126..63158fd 100644 (file)
@@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void)
 
 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
 
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
-
 extern void efi_thunk_runtime_setup(void);
 efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
                                         unsigned long descriptor_size,
index cefe1d8..9e50da3 100644 (file)
@@ -47,6 +47,7 @@ KVM_X86_OP(set_dr7)
 KVM_X86_OP(cache_reg)
 KVM_X86_OP(get_rflags)
 KVM_X86_OP(set_rflags)
+KVM_X86_OP(get_if_flag)
 KVM_X86_OP(tlb_flush_all)
 KVM_X86_OP(tlb_flush_current)
 KVM_X86_OP_NULL(tlb_remote_flush)
index 860ed50..555f4de 100644 (file)
@@ -97,7 +97,7 @@
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_TLB_FLUSH_CURRENT      KVM_ARCH_REQ(26)
 #define KVM_REQ_TLB_FLUSH_GUEST \
-       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_APF_READY              KVM_ARCH_REQ(28)
 #define KVM_REQ_MSR_FILTER_CHANGED     KVM_ARCH_REQ(29)
 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
@@ -1349,6 +1349,7 @@ struct kvm_x86_ops {
        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+       bool (*get_if_flag)(struct kvm_vcpu *vcpu);
 
        void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
        void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
index 4cd49af..74f0a2d 100644 (file)
@@ -4,8 +4,8 @@
 
 #include <asm/cpufeature.h>
 
-#define PKRU_AD_BIT 0x1
-#define PKRU_WD_BIT 0x2
+#define PKRU_AD_BIT 0x1u
+#define PKRU_WD_BIT 0x2u
 #define PKRU_BITS_PER_PKEY 2
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
index 6a190c7..e04f5e6 100644 (file)
@@ -713,9 +713,6 @@ static void __init early_reserve_memory(void)
 
        early_reserve_initrd();
 
-       if (efi_enabled(EFI_BOOT))
-               efi_memblock_x86_reserve_range();
-
        memblock_x86_reserve_range_setup_data();
 
        reserve_ibft_region();
@@ -742,28 +739,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
        return 0;
 }
 
-static char * __init prepare_command_line(void)
-{
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
-       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
-       if (builtin_cmdline[0]) {
-               /* append boot loader cmdline to builtin */
-               strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
-               strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
-               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-       }
-#endif
-#endif
-
-       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-
-       parse_early_param();
-
-       return command_line;
-}
-
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -852,23 +827,6 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
-       /*
-        * x86_configure_nx() is called before parse_early_param() (called by
-        * prepare_command_line()) to detect whether hardware doesn't support
-        * NX (so that the early EHCI debug console setup can safely call
-        * set_fixmap()). It may then be called again from within noexec_setup()
-        * during parsing early parameters to honor the respective command line
-        * option.
-        */
-       x86_configure_nx();
-
-       /*
-        * This parses early params and it needs to run before
-        * early_reserve_memory() because latter relies on such settings
-        * supplied as early params.
-        */
-       *cmdline_p = prepare_command_line();
-
        /*
         * Do some memory reservations *before* memory is added to memblock, so
         * memblock allocations won't overwrite it.
@@ -902,6 +860,36 @@ void __init setup_arch(char **cmdline_p)
        bss_resource.start = __pa_symbol(__bss_start);
        bss_resource.end = __pa_symbol(__bss_stop)-1;
 
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+       if (builtin_cmdline[0]) {
+               /* append boot loader cmdline to builtin */
+               strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+               strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+       }
+#endif
+#endif
+
+       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+       *cmdline_p = command_line;
+
+       /*
+        * x86_configure_nx() is called before parse_early_param() to detect
+        * whether hardware doesn't support NX (so that the early EHCI debug
+        * console setup can safely call set_fixmap()). It may then be called
+        * again from within noexec_setup() during parsing early parameters
+        * to honor the respective command line option.
+        */
+       x86_configure_nx();
+
+       parse_early_param();
+
+       if (efi_enabled(EFI_BOOT))
+               efi_memblock_x86_reserve_range();
+
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
index ac2909f..617012f 100644 (file)
@@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
        { NULL, },
 };
 
+static struct sched_domain_topology_level x86_hybrid_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
+#endif
+       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
 static struct sched_domain_topology_level x86_topology[] = {
 #ifdef CONFIG_SCHED_SMT
        { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
@@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
 
        calculate_max_logical_packages();
 
+       /* XXX for now assume numa-in-package and hybrid don't overlap */
        if (x86_has_numa_in_package)
                set_sched_topology(x86_numa_in_package_topology);
+       if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+               set_sched_topology(x86_hybrid_topology);
 
        nmi_selftest();
        impress_friends();
index 54a83a7..f33c804 100644 (file)
@@ -95,6 +95,9 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
        unsigned int *log[KVM_NR_PAGE_SIZES], *cur;
        int i, j, k, l, ret;
 
+       if (!kvm_memslots_have_rmaps(kvm))
+               return 0;
+
        ret = -ENOMEM;
        memset(log, 0, sizeof(log));
        for (i = 0; i < KVM_NR_PAGE_SIZES; i++) {
index 5e19e6e..8d8c1cc 100644 (file)
@@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
 
                all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
 
+               if (all_cpus)
+                       goto check_and_send_ipi;
+
                if (!sparse_banks_len)
                        goto ret_success;
 
-               if (!all_cpus &&
-                   kvm_read_guest(kvm,
+               if (kvm_read_guest(kvm,
                                   hc->ingpa + offsetof(struct hv_send_ipi_ex,
                                                        vp_set.bank_contents),
                                   sparse_banks,
@@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
        }
 
+check_and_send_ipi:
        if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
                return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
index e2e1d01..fcdf3f8 100644 (file)
@@ -3987,7 +3987,21 @@ out_retry:
 static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
                                struct kvm_page_fault *fault, int mmu_seq)
 {
-       if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
+       struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa);
+
+       /* Special roots, e.g. pae_root, are not backed by shadow pages. */
+       if (sp && is_obsolete_sp(vcpu->kvm, sp))
+               return true;
+
+       /*
+        * Roots without an associated shadow page are considered invalid if
+        * there is a pending request to free obsolete roots.  The request is
+        * only a hint that the current root _may_ be obsolete and needs to be
+        * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
+        * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
+        * to reload even if no vCPU is actively using the root.
+        */
+       if (!sp && kvm_test_request(KVM_REQ_MMU_RELOAD, vcpu))
                return true;
 
        return fault->slot &&
index b3ed302..caa96c2 100644 (file)
@@ -26,6 +26,7 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
  */
 void tdp_iter_restart(struct tdp_iter *iter)
 {
+       iter->yielded = false;
        iter->yielded_gfn = iter->next_last_level_gfn;
        iter->level = iter->root_level;
 
@@ -160,6 +161,11 @@ static bool try_step_up(struct tdp_iter *iter)
  */
 void tdp_iter_next(struct tdp_iter *iter)
 {
+       if (iter->yielded) {
+               tdp_iter_restart(iter);
+               return;
+       }
+
        if (try_step_down(iter))
                return;
 
index b1748b9..e19cabb 100644 (file)
@@ -45,6 +45,12 @@ struct tdp_iter {
         * iterator walks off the end of the paging structure.
         */
        bool valid;
+       /*
+        * True if KVM dropped mmu_lock and yielded in the middle of a walk, in
+        * which case tdp_iter_next() needs to restart the walk at the root
+        * level instead of advancing to the next entry.
+        */
+       bool yielded;
 };
 
 /*
index 1db8496..1beb4ca 100644 (file)
@@ -502,6 +502,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
                                           struct tdp_iter *iter,
                                           u64 new_spte)
 {
+       WARN_ON_ONCE(iter->yielded);
+
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        /*
@@ -575,6 +577,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
                                      u64 new_spte, bool record_acc_track,
                                      bool record_dirty_log)
 {
+       WARN_ON_ONCE(iter->yielded);
+
        lockdep_assert_held_write(&kvm->mmu_lock);
 
        /*
@@ -640,18 +644,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
  * If this function should yield and flush is set, it will perform a remote
  * TLB flush before yielding.
  *
- * If this function yields, it will also reset the tdp_iter's walk over the
- * paging structure and the calling function should skip to the next
- * iteration to allow the iterator to continue its traversal from the
- * paging structure root.
+ * If this function yields, iter->yielded is set and the caller must skip to
+ * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
+ * over the paging structures to allow the iterator to continue its traversal
+ * from the paging structure root.
  *
- * Return true if this function yielded and the iterator's traversal was reset.
- * Return false if a yield was not needed.
+ * Returns true if this function yielded.
  */
-static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
-                                            struct tdp_iter *iter, bool flush,
-                                            bool shared)
+static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
+                                                         struct tdp_iter *iter,
+                                                         bool flush, bool shared)
 {
+       WARN_ON(iter->yielded);
+
        /* Ensure forward progress has been made before yielding. */
        if (iter->next_last_level_gfn == iter->yielded_gfn)
                return false;
@@ -671,12 +676,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
 
                WARN_ON(iter->gfn > iter->next_last_level_gfn);
 
-               tdp_iter_restart(iter);
-
-               return true;
+               iter->yielded = true;
        }
 
-       return false;
+       return iter->yielded;
 }
 
 /*
index 7656a2c..be28831 100644 (file)
@@ -1565,7 +1565,7 @@ static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
        r = -EINTR;
        if (mutex_lock_killable(&dst_kvm->lock))
                goto release_src;
-       if (mutex_lock_killable(&src_kvm->lock))
+       if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
                goto unlock_dst;
        return 0;
 
index d0f68d1..5151efa 100644 (file)
@@ -1585,6 +1585,15 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        to_svm(vcpu)->vmcb->save.rflags = rflags;
 }
 
+static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
+{
+       struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+
+       return sev_es_guest(vcpu->kvm)
+               ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
+               : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 {
        switch (reg) {
@@ -3568,14 +3577,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
        if (!gif_set(svm))
                return true;
 
-       if (sev_es_guest(vcpu->kvm)) {
-               /*
-                * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
-                * bit to determine the state of the IF flag.
-                */
-               if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK))
-                       return true;
-       } else if (is_guest_mode(vcpu)) {
+       if (is_guest_mode(vcpu)) {
                /* As long as interrupts are being delivered...  */
                if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
                    ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
@@ -3586,7 +3588,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
                if (nested_exit_on_intr(svm))
                        return false;
        } else {
-               if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
+               if (!svm_get_if_flag(vcpu))
                        return true;
        }
 
@@ -4621,6 +4623,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .cache_reg = svm_cache_reg,
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
+       .get_if_flag = svm_get_if_flag,
 
        .tlb_flush_all = svm_flush_tlb,
        .tlb_flush_current = svm_flush_tlb,
index 9453743..0dbf94e 100644 (file)
@@ -1363,6 +1363,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
                vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
+static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
+{
+       return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -2646,15 +2651,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
                if (!loaded_vmcs->msr_bitmap)
                        goto out_vmcs;
                memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
-
-               if (IS_ENABLED(CONFIG_HYPERV) &&
-                   static_branch_unlikely(&enable_evmcs) &&
-                   (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
-                       struct hv_enlightened_vmcs *evmcs =
-                               (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
-
-                       evmcs->hv_enlightenments_control.msr_bitmap = 1;
-               }
        }
 
        memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
@@ -3968,8 +3964,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
        if (pi_test_and_set_on(&vmx->pi_desc))
                return 0;
 
-       if (vcpu != kvm_get_running_vcpu() &&
-           !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+       if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
                kvm_vcpu_kick(vcpu);
 
        return 0;
@@ -5886,18 +5881,14 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                vmx_flush_pml_buffer(vcpu);
 
        /*
-        * We should never reach this point with a pending nested VM-Enter, and
-        * more specifically emulation of L2 due to invalid guest state (see
-        * below) should never happen as that means we incorrectly allowed a
-        * nested VM-Enter with an invalid vmcs12.
+        * KVM should never reach this point with a pending nested VM-Enter.
+        * More specifically, short-circuiting VM-Entry to emulate L2 due to
+        * invalid guest state should never happen as that means KVM knowingly
+        * allowed a nested VM-Enter with an invalid vmcs12.  More below.
         */
        if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
                return -EIO;
 
-       /* If guest state is invalid, start emulating */
-       if (vmx->emulation_required)
-               return handle_invalid_guest_state(vcpu);
-
        if (is_guest_mode(vcpu)) {
                /*
                 * PML is never enabled when running L2, bail immediately if a
@@ -5919,10 +5910,30 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                 */
                nested_mark_vmcs12_pages_dirty(vcpu);
 
+               /*
+                * Synthesize a triple fault if L2 state is invalid.  In normal
+                * operation, nested VM-Enter rejects any attempt to enter L2
+                * with invalid state.  However, those checks are skipped if
+                * state is being stuffed via RSM or KVM_SET_NESTED_STATE.  If
+                * L2 state is invalid, it means either L1 modified SMRAM state
+                * or userspace provided bad state.  Synthesize TRIPLE_FAULT as
+                * doing so is architecturally allowed in the RSM case, and is
+                * the least awful solution for the userspace case without
+                * risking false positives.
+                */
+               if (vmx->emulation_required) {
+                       nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
+                       return 1;
+               }
+
                if (nested_vmx_reflect_vmexit(vcpu))
                        return 1;
        }
 
+       /* If guest state is invalid, start emulating.  L2 is handled above. */
+       if (vmx->emulation_required)
+               return handle_invalid_guest_state(vcpu);
+
        if (exit_reason.failed_vmentry) {
                dump_vmcs(vcpu);
                vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -6617,9 +6628,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * consistency check VM-Exit due to invalid guest state and bail.
         */
        if (unlikely(vmx->emulation_required)) {
-
-               /* We don't emulate invalid state of a nested guest */
-               vmx->fail = is_guest_mode(vcpu);
+               vmx->fail = 0;
 
                vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
                vmx->exit_reason.failed_vmentry = 1;
@@ -6842,6 +6851,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
        if (err < 0)
                goto free_pml;
 
+       /*
+        * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
+        * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
+        * feature only for vmcs01, KVM currently isn't equipped to realize any
+        * performance benefits from enabling it for vmcs02.
+        */
+       if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
+           (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+               struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
+
+               evmcs->hv_enlightenments_control.msr_bitmap = 1;
+       }
+
        /* The MSR bitmap starts with all ones */
        bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
        bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -7575,6 +7597,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
+       .get_if_flag = vmx_get_if_flag,
 
        .tlb_flush_all = vmx_flush_tlb_all,
        .tlb_flush_current = vmx_flush_tlb_current,
index e0aa4dd..e50e97a 100644 (file)
@@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
            !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
                return 1;
 
-       if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+       if (!(cr0 & X86_CR0_PG) &&
+           (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
                return 1;
 
        static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -1330,7 +1331,7 @@ static const u32 msrs_to_save_all[] = {
        MSR_IA32_UMWAIT_CONTROL,
 
        MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
-       MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
+       MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
        MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
        MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
        MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
@@ -3412,7 +3413,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                if (!msr_info->host_initiated)
                        return 1;
-               if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent))
+               if (kvm_get_msr_feature(&msr_ent))
                        return 1;
                if (data & ~msr_ent.data)
                        return 1;
@@ -7121,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
                           unsigned short port, void *val, unsigned int count)
 {
        if (vcpu->arch.pio.count) {
-               /* Complete previous iteration.  */
+               /*
+                * Complete a previous iteration that required userspace I/O.
+                * Note, @count isn't guaranteed to match pio.count as userspace
+                * can modify ECX before rerunning the vCPU.  Ignore any such
+                * shenanigans as KVM doesn't support modifying the rep count,
+                * and the emulator ensures @count doesn't overflow the buffer.
+                */
        } else {
                int r = __emulator_pio_in(vcpu, size, port, count);
                if (!r)
@@ -7130,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
                /* Results already available, fall through.  */
        }
 
-       WARN_ON(count != vcpu->arch.pio.count);
        complete_emulator_pio_in(vcpu, val);
        return 1;
 }
@@ -8995,14 +9001,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
 
-       /*
-        * if_flag is obsolete and useless, so do not bother
-        * setting it for SEV-ES guests.  Userspace can just
-        * use kvm_run->ready_for_interrupt_injection.
-        */
-       kvm_run->if_flag = !vcpu->arch.guest_state_protected
-               && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
-
+       kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
        kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
 
index 726700f..bafe36e 100644 (file)
@@ -1252,19 +1252,54 @@ st:                     if (is_imm8(insn->off))
                case BPF_LDX | BPF_MEM | BPF_DW:
                case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
-                               /* test src_reg, src_reg */
-                               maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
-                               EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
-                               /* jne start_of_ldx */
-                               EMIT2(X86_JNE, 0);
+                               /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
+                                * add abs(insn->off) to the limit to make sure that negative
+                                * offset won't be an issue.
+                                * insn->off is s16, so it won't affect valid pointers.
+                                */
+                               u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
+                               u8 *end_of_jmp1, *end_of_jmp2;
+
+                               /* Conservatively check that src_reg + insn->off is a kernel address:
+                                * 1. src_reg + insn->off >= limit
+                                * 2. src_reg + insn->off doesn't become small positive.
+                                * Cannot do src_reg + insn->off >= limit in one branch,
+                                * since it needs two spare registers, but JIT has only one.
+                                */
+
+                               /* movabsq r11, limit */
+                               EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
+                               EMIT((u32)limit, 4);
+                               EMIT(limit >> 32, 4);
+                               /* cmp src_reg, r11 */
+                               maybe_emit_mod(&prog, src_reg, AUX_REG, true);
+                               EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+                               /* if unsigned '<' goto end_of_jmp2 */
+                               EMIT2(X86_JB, 0);
+                               end_of_jmp1 = prog;
+
+                               /* mov r11, src_reg */
+                               emit_mov_reg(&prog, true, AUX_REG, src_reg);
+                               /* add r11, insn->off */
+                               maybe_emit_1mod(&prog, AUX_REG, true);
+                               EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
+                               /* jmp if not carry to start_of_ldx
+                                * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
+                                * that has to be rejected.
+                                */
+                               EMIT2(0x73 /* JNC */, 0);
+                               end_of_jmp2 = prog;
+
                                /* xor dst_reg, dst_reg */
                                emit_mov_imm32(&prog, false, dst_reg, 0);
                                /* jmp byte_after_ldx */
                                EMIT2(0xEB, 0);
 
-                               /* populate jmp_offset for JNE above */
-                               temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
+                               /* populate jmp_offset for JB above to jump to xor dst_reg */
+                               end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
+                               /* populate jmp_offset for JNC above to jump to start_of_ldx */
                                start_of_ldx = prog;
+                               end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
                        }
                        emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
@@ -1305,7 +1340,7 @@ st:                       if (is_imm8(insn->off))
                                 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
                                 * of 4 bytes will be ignored and rbx will be zero inited.
                                 */
-                               ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
+                               ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
                        }
                        break;
 
index b15ebfe..b0b848d 100644 (file)
@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
-       new = early_memremap(data.phys_map, data.size);
+       new = early_memremap_prot(data.phys_map, data.size,
+                                 pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
        if (!new) {
                pr_err("Failed to map new boot services memmap\n");
                return;
index c736cf2..e2c5b29 100644 (file)
@@ -68,7 +68,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
        "(__parainstructions|__alt_instructions)(_end)?|"
        "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
        "__(start|end)_pci_.*|"
-#if CONFIG_FW_LOADER_BUILTIN
+#if CONFIG_FW_LOADER
        "__(start|end)_builtin_fw|"
 #endif
        "__(start|stop)___ksymtab(_gpl)?|"
index a5b37cc..769b643 100644 (file)
@@ -2311,7 +2311,14 @@ static void ioc_timer_fn(struct timer_list *timer)
                        hwm = current_hweight_max(iocg);
                        new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
                                                         usage, &now);
-                       if (new_hwi < hwm) {
+                       /*
+                        * Donation calculation assumes hweight_after_donation
+                        * to be positive, a condition that a donor w/ hwa < 2
+                        * can't meet. Don't bother with donation if hwa is
+                        * below 2. It's not gonna make a meaningful difference
+                        * anyway.
+                        */
+                       if (new_hwi < hwm && hwa >= 2) {
                                iocg->hweight_donating = hwa;
                                iocg->hweight_after_donation = new_hwi;
                                list_add(&iocg->surplus_list, &surpluses);
index ad732a3..0da147e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/falloc.h>
 #include <linux/suspend.h>
 #include <linux/fs.h>
+#include <linux/module.h>
 #include "blk.h"
 
 static inline struct inode *bdev_file_inode(struct file *file)
@@ -340,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
        } else {
                ret = bio_iov_iter_get_pages(bio, iter);
                if (unlikely(ret)) {
-                       bio->bi_status = BLK_STS_IOERR;
-                       bio_endio(bio);
+                       bio_put(bio);
                        return ret;
                }
        }
index 313c14a..6f01d35 100644 (file)
@@ -220,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
                                pgrp = task_pgrp(current);
                        else
                                pgrp = find_vpid(who);
+                       read_lock(&tasklist_lock);
                        do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
                                tmpio = get_task_ioprio(p);
                                if (tmpio < 0)
@@ -229,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
                                else
                                        ret = ioprio_best(ret, tmpio);
                        } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+                       read_unlock(&tasklist_lock);
+
                        break;
                case IOPRIO_WHO_USER:
                        uid = make_kuid(current_user_ns(), who);
index be5d40a..a110338 100644 (file)
@@ -41,8 +41,7 @@ obj-$(CONFIG_DMADEVICES)      += dma/
 # SOC specific infrastructure drivers.
 obj-y                          += soc/
 
-obj-$(CONFIG_VIRTIO)           += virtio/
-obj-$(CONFIG_VIRTIO_PCI_LIB)   += virtio/
+obj-y                          += virtio/
 obj-$(CONFIG_VDPA)             += vdpa/
 obj-$(CONFIG_XEN)              += xen/
 
index cffbe57..c75fb60 100644 (file)
@@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
        __release(&t->lock);
 
        /*
-        * If this thread used poll, make sure we remove the waitqueue
-        * from any epoll data structures holding it with POLLFREE.
-        * waitqueue_active() is safe to use here because we're holding
-        * the inner lock.
+        * If this thread used poll, make sure we remove the waitqueue from any
+        * poll data structures holding it.
         */
-       if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
-           waitqueue_active(&thread->wait)) {
-               wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
-       }
+       if (thread->looper & BINDER_LOOPER_STATE_POLL)
+               wake_up_pollfree(&thread->wait);
 
        binder_inner_proc_unlock(thread->proc);
 
        /*
-        * This is needed to avoid races between wake_up_poll() above and
-        * and ep_remove_waitqueue() called for other reasons (eg the epoll file
-        * descriptor being closed); ep_remove_waitqueue() holds an RCU read
-        * lock, so we can be sure it's done after calling synchronize_rcu().
+        * This is needed to avoid races between wake_up_pollfree() above and
+        * someone else removing the last entry from the queue for other reasons
+        * (e.g. ep_remove_wait_queue() being called due to an epoll file
+        * descriptor being closed).  Such other users hold an RCU read lock, so
+        * we can be sure they're done after we call synchronize_rcu().
         */
        if (thread->looper & BINDER_LOOPER_STATE_POLL)
                synchronize_rcu();
index 340515f..47bc74a 100644 (file)
@@ -671,7 +671,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
        BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
 
        if (buffer->async_transaction) {
-               alloc->free_async_space += size + sizeof(struct binder_buffer);
+               alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
 
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
                             "%d: binder_free_buf size %zd async free %zd\n",
index 50b56cd..e9c7c07 100644 (file)
@@ -94,6 +94,7 @@ struct ceva_ahci_priv {
 static unsigned int ceva_ahci_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id)
 {
+       __le16 *__id = (__le16 *)id;
        u32 err_mask;
 
        err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
         * Since CEVA controller does not support device sleep feature, we
         * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
         */
-       id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+       __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
 
        return 0;
 }
index 59ad8c9..aba0c67 100644 (file)
@@ -3920,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "VRFDFC22048UCHC-TE*", NULL,          ATA_HORKAGE_NODMA },
        /* Odd clown on sil3726/4726 PMPs */
        { "Config  Disk",       NULL,           ATA_HORKAGE_DISABLE },
+       /* Similar story with ASMedia 1092 */
+       { "ASMT109x- Config",   NULL,           ATA_HORKAGE_DISABLE },
 
        /* Weird ATAPI devices */
        { "TORiSAN DVD-ROM DRD-N216", NULL,     ATA_HORKAGE_MAX_SEC_128 },
index 1b84d55..313e947 100644 (file)
@@ -2859,8 +2859,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
                goto invalid_fld;
        }
 
-       if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
-               tf->protocol = ATA_PROT_NCQ_NODATA;
+       if ((cdb[2 + cdb_offset] & 0x3) == 0) {
+               /*
+                * When T_LENGTH is zero (No data is transferred), dir should
+                * be DMA_NONE.
+                */
+               if (scmd->sc_data_direction != DMA_NONE) {
+                       fp = 2 + cdb_offset;
+                       goto invalid_fld;
+               }
+
+               if (ata_is_ncq(tf->protocol))
+                       tf->protocol = ATA_PROT_NCQ_NODATA;
+       }
 
        /* enable LBA */
        tf->flags |= ATA_TFLAG_LBA;
index 304accd..6d309e4 100644 (file)
@@ -37,7 +37,7 @@ struct charlcd_priv {
        bool must_clear;
 
        /* contains the LCD config state */
-       unsigned long int flags;
+       unsigned long flags;
 
        /* Current escape sequence and it's length or -1 if outside */
        struct {
@@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd)
         * Since charlcd_init_display() needs to write data, we have to
         * enable mark the LCD initialized just before.
         */
+       if (WARN_ON(!lcd->ops->init_display))
+               return -EINVAL;
+
        ret = lcd->ops->init_display(lcd);
        if (ret)
                return ret;
index f4d0c55..04ea92c 100644 (file)
@@ -1902,7 +1902,7 @@ int dpm_prepare(pm_message_t state)
        device_block_probing();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_list)) {
+       while (!list_empty(&dpm_list) && !error) {
                struct device *dev = to_device(dpm_list.next);
 
                get_device(dev);
index 8e3983e..286cf1a 100644 (file)
@@ -1512,9 +1512,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        unsigned long flags;
        struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
        struct blkfront_info *info = rinfo->dev_info;
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
 
-       if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+       if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
                return IRQ_HANDLED;
+       }
 
        spin_lock_irqsave(&rinfo->ring_lock, flags);
  again:
@@ -1530,6 +1533,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                unsigned long id;
                unsigned int op;
 
+               eoiflag = 0;
+
                RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
                id = bret.id;
 
@@ -1646,6 +1651,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
 
+       xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 
  err:
@@ -1653,6 +1660,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
 
+       /* No EOI in order to avoid further interrupts. */
+
        pr_alert("%s disabled for further use\n", info->gd->disk_name);
        return IRQ_HANDLED;
 }
@@ -1692,8 +1701,8 @@ static int setup_blkring(struct xenbus_device *dev,
        if (err)
                goto fail;
 
-       err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
-                                       "blkif", rinfo);
+       err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
+                                               0, "blkif", rinfo);
        if (err <= 0) {
                xenbus_dev_fatal(dev, err,
                                 "bind_evtchn_to_irqhandler failed");
index fb99e37..547e6e7 100644 (file)
@@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
 }
 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
 
-int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
 {
        struct mhi_chan *itr, *tmp;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
                return -EIO;
 
-       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
-               return -EINVAL;
+       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+               dev_warn(dev, "Resuming from non M3 state (%s)\n",
+                        TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
+               if (!force)
+                       return -EINVAL;
+       }
 
        /* Notify clients about exiting LPM */
        list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
@@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 
        return 0;
 }
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+       return __mhi_pm_resume(mhi_cntrl, false);
+}
 EXPORT_SYMBOL_GPL(mhi_pm_resume);
 
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+       return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 {
        int ret;
index 59a4896..4c577a7 100644 (file)
@@ -20,7 +20,7 @@
 
 #define MHI_PCI_DEFAULT_BAR_NUM 0
 
-#define MHI_POST_RESET_DELAY_MS 500
+#define MHI_POST_RESET_DELAY_MS 2000
 
 #define HEALTH_CHECK_PERIOD (HZ * 2)
 
index 6f225dd..4566e73 100644 (file)
@@ -687,11 +687,11 @@ err_clk_disable:
 
 static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
 {
-       /* Keep the clock and PM reference counts consistent. */
-       if (pm_runtime_status_suspended(rsb->dev))
-               pm_runtime_resume(rsb->dev);
        reset_control_assert(rsb->rstc);
-       clk_disable_unprepare(rsb->clk);
+
+       /* Keep the clock and PM reference counts consistent. */
+       if (!pm_runtime_status_suspended(rsb->dev))
+               clk_disable_unprepare(rsb->clk);
 }
 
 static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
index c837d54..c592651 100644 (file)
@@ -3031,7 +3031,7 @@ cleanup_bmc_device(struct kref *ref)
         * with removing the device attributes while reading a device
         * attribute.
         */
-       schedule_work(&bmc->remove_work);
+       queue_work(remove_work_wq, &bmc->remove_work);
 }
 
 /*
@@ -5392,22 +5392,27 @@ static int ipmi_init_msghandler(void)
        if (initialized)
                goto out;
 
-       init_srcu_struct(&ipmi_interfaces_srcu);
-
-       timer_setup(&ipmi_timer, ipmi_timeout, 0);
-       mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
-
-       atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+       rv = init_srcu_struct(&ipmi_interfaces_srcu);
+       if (rv)
+               goto out;
 
        remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
        if (!remove_work_wq) {
                pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
                rv = -ENOMEM;
-               goto out;
+               goto out_wq;
        }
 
+       timer_setup(&ipmi_timer, ipmi_timeout, 0);
+       mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+       atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
        initialized = true;
 
+out_wq:
+       if (rv)
+               cleanup_srcu_struct(&ipmi_interfaces_srcu);
 out:
        mutex_unlock(&ipmi_interfaces_mutex);
        return rv;
index 0c62e57..48aab77 100644 (file)
@@ -1659,6 +1659,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
                }
        }
 
+       ssif_info->client = client;
+       i2c_set_clientdata(client, ssif_info);
+
        rv = ssif_check_and_remove(client, ssif_info);
        /* If rv is 0 and addr source is not SI_ACPI, continue probing */
        if (!rv && ssif_info->addr_source == SI_ACPI) {
@@ -1679,9 +1682,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
                ipmi_addr_src_to_str(ssif_info->addr_source),
                client->addr, client->adapter->name, slave_addr);
 
-       ssif_info->client = client;
-       i2c_set_clientdata(client, ssif_info);
-
        /* Now check for system interface capabilities */
        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
        msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -1881,6 +1881,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
                dev_err(&ssif_info->client->dev,
                        "Unable to start IPMI SSIF: %d\n", rv);
+               i2c_set_clientdata(client, NULL);
                kfree(ssif_info);
        }
        kfree(resp);
index f467d63..566ee2c 100644 (file)
@@ -3418,6 +3418,14 @@ static int __clk_core_init(struct clk_core *core)
 
        clk_prepare_lock();
 
+       /*
+        * Set hw->core after grabbing the prepare_lock to synchronize with
+        * callers of clk_core_fill_parent_index() where we treat hw->core
+        * being NULL as the clk not being registered yet. This is crucial so
+        * that clks aren't parented until their parent is fully registered.
+        */
+       core->hw->core = core;
+
        ret = clk_pm_runtime_get(core);
        if (ret)
                goto unlock;
@@ -3582,8 +3590,10 @@ static int __clk_core_init(struct clk_core *core)
 out:
        clk_pm_runtime_put(core);
 unlock:
-       if (ret)
+       if (ret) {
                hlist_del_init(&core->child_node);
+               core->hw->core = NULL;
+       }
 
        clk_prepare_unlock();
 
@@ -3847,7 +3857,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
        core->num_parents = init->num_parents;
        core->min_rate = 0;
        core->max_rate = ULONG_MAX;
-       hw->core = core;
 
        ret = clk_core_populate_parent_map(core, init);
        if (ret)
@@ -3865,7 +3874,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
                goto fail_create_clk;
        }
 
-       clk_core_link_consumer(hw->core, hw->clk);
+       clk_core_link_consumer(core, hw->clk);
 
        ret = __clk_core_init(core);
        if (!ret)
index d3e905c..b237580 100644 (file)
@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
        .probe = imx8qxp_lpcg_clk_probe,
 };
 
-builtin_platform_driver(imx8qxp_lpcg_clk_driver);
+module_platform_driver(imx8qxp_lpcg_clk_driver);
 
 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
 MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
index c53a688..40a2efb 100644 (file)
@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
        },
        .probe = imx8qxp_clk_probe,
 };
-builtin_platform_driver(imx8qxp_clk_driver);
+module_platform_driver(imx8qxp_clk_driver);
 
 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
 MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
index eaedcce..8f65b9b 100644 (file)
@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
 void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                             const struct alpha_pll_config *config)
 {
+       /*
+        * If the bootloader left the PLL enabled it's likely that there are
+        * RCGs that will lock up if we disable the PLL below.
+        */
+       if (trion_pll_is_enabled(pll, regmap)) {
+               pr_debug("Trion PLL is already enabled, skipping configuration\n");
+               return;
+       }
+
        clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
        regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
        clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
index b2d00b4..45d9cca 100644 (file)
@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
        val &= mask;
 
        if (mux->parent_map)
-               return qcom_find_src_index(hw, mux->parent_map, val);
+               return qcom_find_cfg_index(hw, mux->parent_map, val);
 
        return val;
 }
index 0932e01..75f09e6 100644 (file)
@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
 }
 EXPORT_SYMBOL_GPL(qcom_find_src_index);
 
+int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
+{
+       int i, num_parents = clk_hw_get_num_parents(hw);
+
+       for (i = 0; i < num_parents; i++)
+               if (cfg == map[i].cfg)
+                       return i;
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
+
 struct regmap *
 qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 {
index bb39a7e..9c8f7b7 100644 (file)
@@ -49,6 +49,8 @@ extern void
 qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
                               u8 src);
+extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
+                              u8 cfg);
 
 extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
                                      const char *name, unsigned long rate);
index 543cfab..431b55b 100644 (file)
@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
                .name = "gcc_sdcc1_apps_clk_src",
                .parent_data = gcc_parent_data_1,
                .num_parents = ARRAY_SIZE(gcc_parent_data_1),
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
                .name = "gcc_sdcc1_ice_core_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = ARRAY_SIZE(gcc_parent_data_0),
-               .ops = &clk_rcg2_floor_ops,
+               .ops = &clk_rcg2_ops,
        },
 };
 
index d52f976..d5cb372 100644 (file)
@@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
 
        regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
        if (IS_ERR(regclk)) {
-               kfree(name);
                pr_err("error setting up syscon ICST clock %s\n", name);
+               kfree(name);
                return;
        }
        of_clk_add_provider(np, of_clk_src_simple_get, regclk);
index 9a04eac..1ecd52f 100644 (file)
@@ -394,8 +394,13 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
 
 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
 
-static void erratum_set_next_event_generic(const int access, unsigned long evt,
-                                               struct clock_event_device *clk)
+/*
+ * Force the inlining of this function so that the register accesses
+ * can be themselves correctly inlined.
+ */
+static __always_inline
+void erratum_set_next_event_generic(const int access, unsigned long evt,
+                                   struct clock_event_device *clk)
 {
        unsigned long ctrl;
        u64 cval;
index 3819ef5..3245eb0 100644 (file)
@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
                        pr_warn("pclk for %pOFn is present, but could not be activated\n",
                                np);
 
-       if (!of_property_read_u32(np, "clock-freq", rate) &&
+       if (!of_property_read_u32(np, "clock-freq", rate) ||
            !of_property_read_u32(np, "clock-frequency", rate))
                return 0;
 
index fa768f1..fd29861 100644 (file)
@@ -211,6 +211,12 @@ static u32 uof_get_ae_mask(u32 obj_num)
        return adf_4xxx_fw_config[obj_num].ae_mask;
 }
 
+static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+       /* For the moment do not report vf2pf sources */
+       return 0;
+}
+
 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
 {
        hw_data->dev_class = &adf_4xxx_class;
@@ -254,6 +260,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
        hw_data->set_msix_rttable = set_msix_default_rttable;
        hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
        hw_data->enable_pfvf_comms = pfvf_comms_disabled;
+       hw_data->get_vf2pf_sources = get_vf2pf_sources;
        hw_data->disable_iov = adf_disable_sriov;
        hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
 
index cd0d745..33baf15 100644 (file)
@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
                                      struct axi_dma_desc *first)
 {
        u32 priority = chan->chip->dw->hdata->priority[chan->id];
-       struct axi_dma_chan_config config;
+       struct axi_dma_chan_config config = {};
        u32 irq_mask;
        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 
@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
        config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
        config.prior = priority;
        config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
-       config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
+       config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
        switch (chan->direction) {
        case DMA_MEM_TO_DEV:
                dw_axi_dma_set_byte_halfword(chan, true);
index 198f6cd..cee7aa2 100644 (file)
@@ -187,17 +187,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
 
        /* DMA configuration */
        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (!err) {
+       if (err) {
                pci_err(pdev, "DMA mask 64 set failed\n");
                return err;
-       } else {
-               pci_err(pdev, "DMA mask 64 set failed\n");
-
-               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-               if (err) {
-                       pci_err(pdev, "DMA mask 32 set failed\n");
-                       return err;
-               }
        }
 
        /* Data structure allocation */
index 17f2f8a..cf2c8bc 100644 (file)
@@ -137,10 +137,10 @@ halt:
                        INIT_WORK(&idxd->work, idxd_device_reinit);
                        queue_work(idxd->wq, &idxd->work);
                } else {
-                       spin_lock(&idxd->dev_lock);
                        idxd->state = IDXD_DEV_HALTED;
                        idxd_wqs_quiesce(idxd);
                        idxd_wqs_unmap_portal(idxd);
+                       spin_lock(&idxd->dev_lock);
                        idxd_device_clear_state(idxd);
                        dev_err(&idxd->pdev->dev,
                                "idxd halted, need %s.\n",
index de76fb4..83452fb 100644 (file)
@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
 {
        struct idxd_desc *d, *t, *found = NULL;
        struct llist_node *head;
+       LIST_HEAD(flist);
 
        desc->completion->status = IDXD_COMP_DESC_ABORT;
        /*
@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
                                found = desc;
                                continue;
                        }
-                       list_add_tail(&desc->list, &ie->work_list);
+
+                       if (d->completion->status)
+                               list_add_tail(&d->list, &flist);
+                       else
+                               list_add_tail(&d->list, &ie->work_list);
                }
        }
 
@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
 
        if (found)
                complete_desc(found, IDXD_COMPLETE_ABORT);
+
+       /*
+        * complete_desc() will return desc to allocator and the desc can be
+        * acquired by a different process and the desc->list can be modified.
+        * Delete desc from list so the list trasversing does not get corrupted
+        * by the other process.
+        */
+       list_for_each_entry_safe(d, t, &flist, list) {
+               list_del_init(&d->list);
+               complete_desc(d, IDXD_COMPLETE_NORMAL);
+       }
 }
 
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
index 962b6e0..d95c421 100644 (file)
@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
index 041d8e3..6e56d1c 100644 (file)
@@ -4534,45 +4534,60 @@ static int udma_setup_resources(struct udma_dev *ud)
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
        if (IS_ERR(rm_res)) {
                bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+               irq_res.sets = 1;
        } else {
                bitmap_fill(ud->tchan_map, ud->tchan_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->tchan_map,
                                                  &rm_res->desc[i], "tchan");
+               irq_res.sets = rm_res->sets;
        }
-       irq_res.sets = rm_res->sets;
 
        /* rchan and matching default flow ranges */
        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
        if (IS_ERR(rm_res)) {
                bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+               irq_res.sets++;
        } else {
                bitmap_fill(ud->rchan_map, ud->rchan_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->rchan_map,
                                                  &rm_res->desc[i], "rchan");
+               irq_res.sets += rm_res->sets;
        }
 
-       irq_res.sets += rm_res->sets;
        irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       if (!irq_res.desc)
+               return -ENOMEM;
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
-       for (i = 0; i < rm_res->sets; i++) {
-               irq_res.desc[i].start = rm_res->desc[i].start;
-               irq_res.desc[i].num = rm_res->desc[i].num;
-               irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
-               irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[0].start = 0;
+               irq_res.desc[0].num = ud->tchan_cnt;
+               i = 1;
+       } else {
+               for (i = 0; i < rm_res->sets; i++) {
+                       irq_res.desc[i].start = rm_res->desc[i].start;
+                       irq_res.desc[i].num = rm_res->desc[i].num;
+                       irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+                       irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+               }
        }
        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
-       for (j = 0; j < rm_res->sets; j++, i++) {
-               if (rm_res->desc[j].num) {
-                       irq_res.desc[i].start = rm_res->desc[j].start +
-                                       ud->soc_data->oes.udma_rchan;
-                       irq_res.desc[i].num = rm_res->desc[j].num;
-               }
-               if (rm_res->desc[j].num_sec) {
-                       irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
-                                       ud->soc_data->oes.udma_rchan;
-                       irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[i].start = 0;
+               irq_res.desc[i].num = ud->rchan_cnt;
+       } else {
+               for (j = 0; j < rm_res->sets; j++, i++) {
+                       if (rm_res->desc[j].num) {
+                               irq_res.desc[i].start = rm_res->desc[j].start +
+                                               ud->soc_data->oes.udma_rchan;
+                               irq_res.desc[i].num = rm_res->desc[j].num;
+                       }
+                       if (rm_res->desc[j].num_sec) {
+                               irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+                                               ud->soc_data->oes.udma_rchan;
+                               irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+                       }
                }
        }
        ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -4690,14 +4705,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
                rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
                if (IS_ERR(rm_res)) {
                        bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+                       irq_res.sets++;
                } else {
                        bitmap_fill(ud->bchan_map, ud->bchan_cnt);
                        for (i = 0; i < rm_res->sets; i++)
                                udma_mark_resource_ranges(ud, ud->bchan_map,
                                                          &rm_res->desc[i],
                                                          "bchan");
+                       irq_res.sets += rm_res->sets;
                }
-               irq_res.sets += rm_res->sets;
        }
 
        /* tchan ranges */
@@ -4705,14 +4721,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
                rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
                if (IS_ERR(rm_res)) {
                        bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+                       irq_res.sets += 2;
                } else {
                        bitmap_fill(ud->tchan_map, ud->tchan_cnt);
                        for (i = 0; i < rm_res->sets; i++)
                                udma_mark_resource_ranges(ud, ud->tchan_map,
                                                          &rm_res->desc[i],
                                                          "tchan");
+                       irq_res.sets += rm_res->sets * 2;
                }
-               irq_res.sets += rm_res->sets * 2;
        }
 
        /* rchan ranges */
@@ -4720,47 +4737,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
                rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
                if (IS_ERR(rm_res)) {
                        bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+                       irq_res.sets += 2;
                } else {
                        bitmap_fill(ud->rchan_map, ud->rchan_cnt);
                        for (i = 0; i < rm_res->sets; i++)
                                udma_mark_resource_ranges(ud, ud->rchan_map,
                                                          &rm_res->desc[i],
                                                          "rchan");
+                       irq_res.sets += rm_res->sets * 2;
                }
-               irq_res.sets += rm_res->sets * 2;
        }
 
        irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       if (!irq_res.desc)
+               return -ENOMEM;
        if (ud->bchan_cnt) {
                rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
-               for (i = 0; i < rm_res->sets; i++) {
-                       irq_res.desc[i].start = rm_res->desc[i].start +
-                                               oes->bcdma_bchan_ring;
-                       irq_res.desc[i].num = rm_res->desc[i].num;
+               if (IS_ERR(rm_res)) {
+                       irq_res.desc[0].start = oes->bcdma_bchan_ring;
+                       irq_res.desc[0].num = ud->bchan_cnt;
+                       i = 1;
+               } else {
+                       for (i = 0; i < rm_res->sets; i++) {
+                               irq_res.desc[i].start = rm_res->desc[i].start +
+                                                       oes->bcdma_bchan_ring;
+                               irq_res.desc[i].num = rm_res->desc[i].num;
+                       }
                }
        }
        if (ud->tchan_cnt) {
                rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
-               for (j = 0; j < rm_res->sets; j++, i += 2) {
-                       irq_res.desc[i].start = rm_res->desc[j].start +
-                                               oes->bcdma_tchan_data;
-                       irq_res.desc[i].num = rm_res->desc[j].num;
-
-                       irq_res.desc[i + 1].start = rm_res->desc[j].start +
-                                               oes->bcdma_tchan_ring;
-                       irq_res.desc[i + 1].num = rm_res->desc[j].num;
+               if (IS_ERR(rm_res)) {
+                       irq_res.desc[i].start = oes->bcdma_tchan_data;
+                       irq_res.desc[i].num = ud->tchan_cnt;
+                       irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
+                       irq_res.desc[i + 1].num = ud->tchan_cnt;
+                       i += 2;
+               } else {
+                       for (j = 0; j < rm_res->sets; j++, i += 2) {
+                               irq_res.desc[i].start = rm_res->desc[j].start +
+                                                       oes->bcdma_tchan_data;
+                               irq_res.desc[i].num = rm_res->desc[j].num;
+
+                               irq_res.desc[i + 1].start = rm_res->desc[j].start +
+                                                       oes->bcdma_tchan_ring;
+                               irq_res.desc[i + 1].num = rm_res->desc[j].num;
+                       }
                }
        }
        if (ud->rchan_cnt) {
                rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
-               for (j = 0; j < rm_res->sets; j++, i += 2) {
-                       irq_res.desc[i].start = rm_res->desc[j].start +
-                                               oes->bcdma_rchan_data;
-                       irq_res.desc[i].num = rm_res->desc[j].num;
-
-                       irq_res.desc[i + 1].start = rm_res->desc[j].start +
-                                               oes->bcdma_rchan_ring;
-                       irq_res.desc[i + 1].num = rm_res->desc[j].num;
+               if (IS_ERR(rm_res)) {
+                       irq_res.desc[i].start = oes->bcdma_rchan_data;
+                       irq_res.desc[i].num = ud->rchan_cnt;
+                       irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
+                       irq_res.desc[i + 1].num = ud->rchan_cnt;
+                       i += 2;
+               } else {
+                       for (j = 0; j < rm_res->sets; j++, i += 2) {
+                               irq_res.desc[i].start = rm_res->desc[j].start +
+                                                       oes->bcdma_rchan_data;
+                               irq_res.desc[i].num = rm_res->desc[j].num;
+
+                               irq_res.desc[i + 1].start = rm_res->desc[j].start +
+                                                       oes->bcdma_rchan_ring;
+                               irq_res.desc[i + 1].num = rm_res->desc[j].num;
+                       }
                }
        }
 
@@ -4858,39 +4900,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
        if (IS_ERR(rm_res)) {
                /* all rflows are assigned exclusively to Linux */
                bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+               irq_res.sets = 1;
        } else {
                bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->rflow_in_use,
                                                  &rm_res->desc[i], "rflow");
+               irq_res.sets = rm_res->sets;
        }
-       irq_res.sets = rm_res->sets;
 
        /* tflow ranges */
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
        if (IS_ERR(rm_res)) {
                /* all tflows are assigned exclusively to Linux */
                bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+               irq_res.sets++;
        } else {
                bitmap_fill(ud->tflow_map, ud->tflow_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->tflow_map,
                                                  &rm_res->desc[i], "tflow");
+               irq_res.sets += rm_res->sets;
        }
-       irq_res.sets += rm_res->sets;
 
        irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       if (!irq_res.desc)
+               return -ENOMEM;
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
-       for (i = 0; i < rm_res->sets; i++) {
-               irq_res.desc[i].start = rm_res->desc[i].start +
-                                       oes->pktdma_tchan_flow;
-               irq_res.desc[i].num = rm_res->desc[i].num;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[0].start = oes->pktdma_tchan_flow;
+               irq_res.desc[0].num = ud->tflow_cnt;
+               i = 1;
+       } else {
+               for (i = 0; i < rm_res->sets; i++) {
+                       irq_res.desc[i].start = rm_res->desc[i].start +
+                                               oes->pktdma_tchan_flow;
+                       irq_res.desc[i].num = rm_res->desc[i].num;
+               }
        }
        rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
-       for (j = 0; j < rm_res->sets; j++, i++) {
-               irq_res.desc[i].start = rm_res->desc[j].start +
-                                       oes->pktdma_rchan_flow;
-               irq_res.desc[i].num = rm_res->desc[j].num;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[i].start = oes->pktdma_rchan_flow;
+               irq_res.desc[i].num = ud->rflow_cnt;
+       } else {
+               for (j = 0; j < rm_res->sets; j++, i++) {
+                       irq_res.desc[i].start = rm_res->desc[j].start +
+                                               oes->pktdma_rchan_flow;
+                       irq_res.desc[i].num = rm_res->desc[j].num;
+               }
        }
        ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
        kfree(irq_res.desc);
index 83345bf..6cf50ee 100644 (file)
@@ -358,6 +358,9 @@ static int i10nm_get_hbm_munits(void)
 
                        mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
                        if (!mbase) {
+                               pci_dev_put(d->imc[lmc].mdev);
+                               d->imc[lmc].mdev = NULL;
+
                                i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
                                             base + off);
                                return -ENOMEM;
@@ -368,6 +371,12 @@ static int i10nm_get_hbm_munits(void)
 
                        mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
                        if (!I10NM_IS_HBM_IMC(mcmtr)) {
+                               iounmap(d->imc[lmc].mbase);
+                               d->imc[lmc].mbase = NULL;
+                               d->imc[lmc].hbm_mc = false;
+                               pci_dev_put(d->imc[lmc].mdev);
+                               d->imc[lmc].mdev = NULL;
+
                                i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
                                return -ENODEV;
                        }
index 5120160..8006739 100644 (file)
@@ -16,7 +16,6 @@ struct scpi_pm_domain {
        struct generic_pm_domain genpd;
        struct scpi_ops *ops;
        u32 domain;
-       char name[30];
 };
 
 /*
@@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
 
                scpi_pd->domain = i;
                scpi_pd->ops = scpi_ops;
-               sprintf(scpi_pd->name, "%pOFn.%d", np, i);
-               scpi_pd->genpd.name = scpi_pd->name;
+               scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
+                                                    "%pOFn.%d", np, i);
+               if (!scpi_pd->genpd.name) {
+                       dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
+                               np, i);
+                       continue;
+               }
                scpi_pd->genpd.power_off = scpi_pd_power_off;
                scpi_pd->genpd.power_on = scpi_pd_power_on;
 
index 6d66fe0..fd89899 100644 (file)
@@ -77,13 +77,14 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
        const char *root_path, *filename = NULL;
        char *root_path_buf;
        size_t root_len;
+       size_t root_path_buf_len = 512;
 
-       root_path_buf = kzalloc(512, GFP_KERNEL);
+       root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
        if (!root_path_buf)
                goto out;
 
        root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
-                               sizeof(root_path_buf));
+                               root_path_buf_len);
        if (IS_ERR(root_path))
                goto out;
 
index 3d6ef37..b3a9b84 100644 (file)
@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
                for_each_set_bit(p, &reg, 32)
-                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
+                       generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2);
        }
 
        chained_irq_exit(ic, desc);
index 026903e..08b9e2c 100644 (file)
@@ -46,6 +46,7 @@
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
+       struct irq_chip irqchip;
 
        /*
         * Cache pin direction to save us one transfer, since the hardware has
@@ -383,15 +384,6 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
        mutex_unlock(&dln2->irq_lock);
 }
 
-static struct irq_chip dln2_gpio_irqchip = {
-       .name = "dln2-irq",
-       .irq_mask = dln2_irq_mask,
-       .irq_unmask = dln2_irq_unmask,
-       .irq_set_type = dln2_irq_set_type,
-       .irq_bus_lock = dln2_irq_bus_lock,
-       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
-};
-
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                            const void *data, int len)
 {
@@ -473,8 +465,15 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        dln2->gpio.direction_output = dln2_gpio_direction_output;
        dln2->gpio.set_config = dln2_gpio_set_config;
 
+       dln2->irqchip.name = "dln2-irq",
+       dln2->irqchip.irq_mask = dln2_irq_mask,
+       dln2->irqchip.irq_unmask = dln2_irq_unmask,
+       dln2->irqchip.irq_set_type = dln2_irq_set_type,
+       dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
+       dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
+
        girq = &dln2->gpio.irq;
-       girq->chip = &dln2_gpio_irqchip;
+       girq->chip = &dln2->irqchip;
        /* The event comes from the outside so no parent handler */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index 84f96b7..9f4941b 100644 (file)
@@ -100,11 +100,7 @@ static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
        virtqueue_kick(vgpio->request_vq);
        mutex_unlock(&vgpio->lock);
 
-       if (!wait_for_completion_timeout(&line->completion, HZ)) {
-               dev_err(dev, "GPIO operation timed out\n");
-               ret = -ETIMEDOUT;
-               goto out;
-       }
+       wait_for_completion(&line->completion);
 
        if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
                dev_err(dev, "GPIO request failed: %d\n", gpio);
index b85b67a..7d67aec 100644 (file)
@@ -1077,6 +1077,7 @@ struct amdgpu_device {
        bool                            runpm;
        bool                            in_runpm;
        bool                            has_pr3;
+       bool                            is_fw_fb;
 
        bool                            pm_sysfs_en;
        bool                            ucode_sysfs_en;
index 1e651b9..694c372 100644 (file)
@@ -3166,6 +3166,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 {
        switch (asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_HAINAN:
+#endif
+       case CHIP_TOPAZ:
+               /* chips with no display hardware */
+               return false;
 #if defined(CONFIG_DRM_AMD_DC)
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
@@ -4461,7 +4467,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                                 struct amdgpu_reset_context *reset_context)
 {
-       int i, j, r = 0;
+       int i, r = 0;
        struct amdgpu_job *job = NULL;
        bool need_full_reset =
                test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4483,15 +4489,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
                /*clear job fence from fence drv to avoid force_completion
                 *leave NULL and vm flush fence in fence drv */
-               for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
-                       struct dma_fence *old, **ptr;
+               amdgpu_fence_driver_clear_job_fences(ring);
 
-                       ptr = &ring->fence_drv.fences[j];
-                       old = rcu_dereference_protected(*ptr, 1);
-                       if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
-                               RCU_INIT_POINTER(*ptr, NULL);
-                       }
-               }
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
        }
index ea00090..bcc9343 100644 (file)
@@ -526,10 +526,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
        }
 }
 
+union gc_info {
+       struct gc_info_v1_0 v1;
+       struct gc_info_v2_0 v2;
+};
+
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
-       struct gc_info_v1_0 *gc_info;
+       union gc_info *gc_info;
 
        if (!adev->mman.discovery_bin) {
                DRM_ERROR("ip discovery uninitialized\n");
@@ -537,28 +542,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
        }
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
-       gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
+       gc_info = (union gc_info *)(adev->mman.discovery_bin +
                        le16_to_cpu(bhdr->table_list[GC].offset));
-
-       adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
-       adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
-                                             le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
-       adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
-       adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
-       adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
-       adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
-       adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
-       adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
-       adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
-       adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
-       adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
-       adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
-       adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
-       adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
-       adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
-                                        le32_to_cpu(gc_info->gc_num_sa_per_se);
-       adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
-
+       switch (gc_info->v1.header.version_major) {
+       case 1:
+               adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+               adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+                                                     le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+               adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+               adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+               adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+               adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+               adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+               adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+               adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+               adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+               adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+               adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+               adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+               adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+               adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+                       le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+               adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+               break;
+       case 2:
+               adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+               adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+               adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+               adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+               adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+               adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+               adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+               adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+               adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+               adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+               adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+               adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+               adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+               adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+               adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+                       le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+               adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+               break;
+       default:
+               dev_err(adev->dev,
+                       "Unhandled GC info table %d.%d\n",
+                       gc_info->v1.header.version_major,
+                       gc_info->v1.header.version_minor);
+               return -EINVAL;
+       }
        return 0;
 }
 
index ae6ab93..7444484 100644 (file)
@@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
        struct amdgpu_vm_bo_base *bo_base;
        int r;
 
-       if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
+       if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
                return;
 
        r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
index ad95de6..99370bd 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/suspend.h>
 #include <linux/cc_platform.h>
+#include <linux/fb.h>
 
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
@@ -328,10 +329,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
 
 /**
  * DOC: runpm (int)
- * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
- * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
+ * the dGPUs when they are idle if supported. The default is -1 (auto enable).
+ * Setting the value to 0 disables this functionality.
  */
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
 module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
 
 /**
@@ -1889,6 +1891,26 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static const struct drm_driver amdgpu_kms_driver;
 
+static bool amdgpu_is_fw_framebuffer(resource_size_t base,
+                                    resource_size_t size)
+{
+       bool found = false;
+#if IS_REACHABLE(CONFIG_FB)
+       struct apertures_struct *a;
+
+       a = alloc_apertures(1);
+       if (!a)
+               return false;
+
+       a->ranges[0].base = base;
+       a->ranges[0].size = size;
+
+       found = is_firmware_framebuffer(a);
+       kfree(a);
+#endif
+       return found;
+}
+
 static int amdgpu_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
 {
@@ -1897,6 +1919,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        unsigned long flags = ent->driver_data;
        int ret, retry = 0, i;
        bool supports_atomic = false;
+       bool is_fw_fb;
+       resource_size_t base, size;
 
        /* skip devices which are owned by radeon */
        for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
@@ -1965,6 +1989,10 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        }
 #endif
 
+       base = pci_resource_start(pdev, 0);
+       size = pci_resource_len(pdev, 0);
+       is_fw_fb = amdgpu_is_fw_framebuffer(base, size);
+
        /* Get rid of things like offb */
        ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
        if (ret)
@@ -1977,6 +2005,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        adev->dev  = &pdev->dev;
        adev->pdev = pdev;
        ddev = adev_to_drm(adev);
+       adev->is_fw_fb = is_fw_fb;
 
        if (!supports_atomic)
                ddev->driver_features &= ~DRIVER_ATOMIC;
@@ -2153,7 +2182,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
        adev->in_s3 = true;
        r = amdgpu_device_suspend(drm_dev, true);
        adev->in_s3 = false;
-
+       if (r)
+               return r;
+       if (!adev->in_s0ix)
+               r = amdgpu_asic_reset(adev);
        return r;
 }
 
@@ -2234,12 +2266,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        if (amdgpu_device_supports_px(drm_dev))
                drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
+       /*
+        * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
+        * proper cleanups and put itself into a state ready for PNP. That
+        * can address some random resuming failure observed on BOCO capable
+        * platforms.
+        * TODO: this may be also needed for PX capable platform.
+        */
+       if (amdgpu_device_supports_boco(drm_dev))
+               adev->mp1_state = PP_MP1_STATE_UNLOAD;
+
        ret = amdgpu_device_suspend(drm_dev, false);
        if (ret) {
                adev->in_runpm = false;
+               if (amdgpu_device_supports_boco(drm_dev))
+                       adev->mp1_state = PP_MP1_STATE_NONE;
                return ret;
        }
 
+       if (amdgpu_device_supports_boco(drm_dev))
+               adev->mp1_state = PP_MP1_STATE_NONE;
+
        if (amdgpu_device_supports_px(drm_dev)) {
                /* Only need to handle PCI state in the driver for ATPX
                 * PCI core handles it for _PR3.
index 3b7e86e..9afd11c 100644 (file)
@@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
  * Cast helper
  */
 static const struct dma_fence_ops amdgpu_fence_ops;
+static const struct dma_fence_ops amdgpu_job_fence_ops;
 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 {
        struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 
-       if (__f->base.ops == &amdgpu_fence_ops)
+       if (__f->base.ops == &amdgpu_fence_ops ||
+           __f->base.ops == &amdgpu_job_fence_ops)
                return __f;
 
        return NULL;
@@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
        }
 
        seq = ++ring->fence_drv.sync_seq;
-       if (job != NULL && job->job_run_counter) {
+       if (job && job->job_run_counter) {
                /* reinit seq for resubmitted jobs */
                fence->seqno = seq;
        } else {
-               dma_fence_init(fence, &amdgpu_fence_ops,
-                               &ring->fence_drv.lock,
-                               adev->fence_context + ring->idx,
-                               seq);
-       }
-
-       if (job != NULL) {
-               /* mark this fence has a parent job */
-               set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+               if (job)
+                       dma_fence_init(fence, &amdgpu_job_fence_ops,
+                                      &ring->fence_drv.lock,
+                                      adev->fence_context + ring->idx, seq);
+               else
+                       dma_fence_init(fence, &amdgpu_fence_ops,
+                                      &ring->fence_drv.lock,
+                                      adev->fence_context + ring->idx, seq);
        }
 
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
        }
 }
 
+/**
+ * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
+ *
+ * @ring: fence of the ring to be cleared
+ *
+ */
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+{
+       int i;
+       struct dma_fence *old, **ptr;
+
+       for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
+               ptr = &ring->fence_drv.fences[i];
+               old = rcu_dereference_protected(*ptr, 1);
+               if (old && old->ops == &amdgpu_job_fence_ops)
+                       RCU_INIT_POINTER(*ptr, NULL);
+       }
+}
+
 /**
  * amdgpu_fence_driver_force_completion - force signal latest fence of ring
  *
@@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 {
-       struct amdgpu_ring *ring;
+       return (const char *)to_amdgpu_fence(f)->ring->name;
+}
 
-       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
-               struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
+{
+       struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
 
-               ring = to_amdgpu_ring(job->base.sched);
-       } else {
-               ring = to_amdgpu_fence(f)->ring;
-       }
-       return (const char *)ring->name;
+       return (const char *)to_amdgpu_ring(job->base.sched)->name;
 }
 
 /**
@@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
  */
 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 {
-       struct amdgpu_ring *ring;
+       if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
+               amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
 
-       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
-               struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+       return true;
+}
 
-               ring = to_amdgpu_ring(job->base.sched);
-       } else {
-               ring = to_amdgpu_fence(f)->ring;
-       }
+/**
+ * amdgpu_job_fence_enable_signaling - enable signalling on job fence
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_enable_signaling above, it
+ * only handles the job embedded fence.
+ */
+static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
+{
+       struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
 
-       if (!timer_pending(&ring->fence_drv.fallback_timer))
-               amdgpu_fence_schedule_fallback(ring);
+       if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
+               amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
 
        return true;
 }
@@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
 {
        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 
-       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
-       /* free job if fence has a parent job */
-               struct amdgpu_job *job;
-
-               job = container_of(f, struct amdgpu_job, hw_fence);
-               kfree(job);
-       } else {
        /* free fence_slab if it's separated fence*/
-               struct amdgpu_fence *fence;
+       kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+}
 
-               fence = to_amdgpu_fence(f);
-               kmem_cache_free(amdgpu_fence_slab, fence);
-       }
+/**
+ * amdgpu_job_fence_free - free up the job with embedded fence
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the job with embedded fence after the RCU grace period.
+ */
+static void amdgpu_job_fence_free(struct rcu_head *rcu)
+{
+       struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+
+       /* free job if fence has a parent job */
+       kfree(container_of(f, struct amdgpu_job, hw_fence));
 }
 
 /**
@@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
        call_rcu(&f->rcu, amdgpu_fence_free);
 }
 
+/**
+ * amdgpu_job_fence_release - callback that job embedded fence can be freed
+ *
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_release above, it
+ * only handles the job embedded fence.
+ */
+static void amdgpu_job_fence_release(struct dma_fence *f)
+{
+       call_rcu(&f->rcu, amdgpu_job_fence_free);
+}
+
 static const struct dma_fence_ops amdgpu_fence_ops = {
        .get_driver_name = amdgpu_fence_get_driver_name,
        .get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
        .release = amdgpu_fence_release,
 };
 
+static const struct dma_fence_ops amdgpu_job_fence_ops = {
+       .get_driver_name = amdgpu_fence_get_driver_name,
+       .get_timeline_name = amdgpu_job_fence_get_timeline_name,
+       .enable_signaling = amdgpu_job_fence_enable_signaling,
+       .release = amdgpu_job_fence_release,
+};
 
 /*
  * Fence debugfs
index 651c7ab..09ad179 100644 (file)
@@ -206,6 +206,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
                        adev->runpm = true;
                        break;
                }
+               /* XXX: disable runtime pm if we are the primary adapter
+                * to avoid displays being re-enabled after DPMS.
+                * This needs to be sorted out and fixed properly.
+                */
+               if (adev->is_fw_fb)
+                       adev->runpm = false;
                if (adev->runpm)
                        dev_info(adev->dev, "Using BACO for runtime pm\n");
        }
index 4d380e7..fae7d18 100644 (file)
@@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
 
-/* fence flag bit to indicate the face is embedded in job*/
-#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT             (DMA_FENCE_FLAG_USER_BITS + 1)
-
 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
 
 #define AMDGPU_IB_POOL_SIZE    (1024 * 1024)
@@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
        struct dma_fence                **fences;
 };
 
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
index b305fd3..edb3e3b 100644 (file)
@@ -3070,8 +3070,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
                              AMD_PG_SUPPORT_CP |
                              AMD_PG_SUPPORT_GDS |
                              AMD_PG_SUPPORT_RLC_SMU_HS)) {
-               WREG32(mmRLC_JUMP_TABLE_RESTORE,
-                      adev->gfx.rlc.cp_table_gpu_addr >> 8);
+               WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
+                            adev->gfx.rlc.cp_table_gpu_addr >> 8);
                gfx_v9_0_init_gfx_power_gating(adev);
        }
 }
index 480e418..ec4d5e1 100644 (file)
@@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
index 14c1c1a..6e0ace2 100644 (file)
@@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index e80d1dc..b4eddf6 100644 (file)
@@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index cb82404..d84523c 100644 (file)
@@ -1808,6 +1808,14 @@ static int gmc_v9_0_hw_fini(void *handle)
                return 0;
        }
 
+       /*
+        * Pair the operations did in gmc_v9_0_hw_init and thus maintain
+        * a correct cached state for GMC. Otherwise, the "gate" again
+        * operation on S3 resuming will fail due to wrong cached state.
+        */
+       if (adev->mmhub.funcs->update_power_gating)
+               adev->mmhub.funcs->update_power_gating(adev, false);
+
        amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 
index a999538..1da2ec6 100644 (file)
@@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
@@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
        if (amdgpu_sriov_vf(adev))
                return;
 
-       if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
-               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
-
-       }
+       if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
+               amdgpu_dpm_set_powergating_by_smu(adev,
+                                                 AMD_IP_BLOCK_TYPE_GMC,
+                                                 enable);
 }
 
 static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
index f80a14a..f5f7181 100644 (file)
@@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
index 25f8e93..3718ff6 100644 (file)
@@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index a11d60e..9e16da2 100644 (file)
@@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index c4ef822..ff49eea 100644 (file)
@@ -189,8 +189,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
-                           ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
index d54d720..3799226 100644 (file)
@@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
 {
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       bool idle_work_unexecuted;
+
+       idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
+       if (idle_work_unexecuted) {
+               if (adev->pm.dpm_enabled)
+                       amdgpu_dpm_enable_uvd(adev, false);
+       }
 
        r = vcn_v1_0_hw_fini(adev);
        if (r)
index 1cd6b9f..e727f1d 100644 (file)
@@ -1051,6 +1051,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                return 0;
        }
 
+       /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+       status = dmub_srv_hw_reset(dmub_srv);
+       if (status != DMUB_STATUS_OK)
+               DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
        hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
 
        fw_inst_const = dmub_fw->data +
@@ -2576,7 +2581,8 @@ static int dm_resume(void *handle)
                 */
                link_enc_cfg_init(dm->dc, dc_state);
 
-               amdgpu_dm_outbox_init(adev);
+               if (dc_enable_dmub_notifications(adev->dm.dc))
+                       amdgpu_dm_outbox_init(adev);
 
                r = dm_dmub_hw_init(adev);
                if (r)
@@ -2625,6 +2631,10 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
+       /* Re-enable outbox interrupts for DPIA. */
+       if (dc_enable_dmub_notifications(adev->dm.dc))
+               amdgpu_dm_outbox_init(adev);
+
        /* Before powering on DC we need to re-initialize DMUB. */
        r = dm_dmub_hw_init(adev);
        if (r)
index f4c9a45..9df38e2 100644 (file)
@@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
                                union display_idle_optimization_u idle_info = { 0 };
                                idle_info.idle_info.df_request_disabled = 1;
                                idle_info.idle_info.phy_ref_clk_off = 1;
+                               idle_info.idle_info.s0i2_rdy = 1;
                                dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
                                /* update power state */
                                clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
index c8457ba..c0bdc23 100644 (file)
@@ -3945,12 +3945,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
                config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
                config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
-               
+
                if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
                                pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
-                       link_enc = pipe_ctx->stream->link->link_enc;
-                       config.dio_output_type = pipe_ctx->stream->link->ep_type;
-                       config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
                        if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
                                link_enc = pipe_ctx->stream->link->link_enc;
                        else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
index b01077a..fad3d88 100644 (file)
@@ -226,6 +226,8 @@ static inline void get_edp_links(const struct dc *dc,
        *edp_num = 0;
        for (i = 0; i < dc->link_count; i++) {
                // report any eDP links, even unconnected DDI's
+               if (!dc->links[i])
+                       continue;
                if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
                        edp_links[*edp_num] = dc->links[i];
                        if (++(*edp_num) == MAX_NUM_EDP)
index 34001a3..10e613e 100644 (file)
@@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .get_clock = dcn10_get_clock,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .calc_vupdate_position = dcn10_calc_vupdate_position,
+       .power_down = dce110_power_down,
        .set_backlight_level = dce110_set_backlight_level,
        .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
        .set_pipe = dce110_set_pipe,
index 3883f91..83f5d9a 100644 (file)
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index 0fa3810..faec029 100644 (file)
@@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_AVOID,
+               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index d452a0d..79313d1 100644 (file)
@@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .clock_trace = true,
                .disable_pplib_clock_request = true,
                .min_disp_clk_khz = 100000,
-               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index 79a66e0..98852b5 100644 (file)
@@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .timing_trace = false,
        .clock_trace = true,
        .disable_pplib_clock_request = true,
-       .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+       .pipe_split_policy = MPC_SPLIT_DYNAMIC,
        .force_single_disp_pipe_split = false,
        .disable_dcc = DCC_ENABLE,
        .vsr_support = true,
index fbaa03f..e472b72 100644 (file)
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_clock_gate = true,
        .disable_pplib_clock_request = true,
        .disable_pplib_wm_range = true,
-       .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+       .pipe_split_policy = MPC_SPLIT_DYNAMIC,
        .force_single_disp_pipe_split = false,
        .disable_dcc = DCC_ENABLE,
        .vsr_support = true,
index fcf96cf..16e7059 100644 (file)
@@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index 4a9b640..87cec14 100644 (file)
@@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index 05335a8..17e2f2b 100644 (file)
@@ -101,6 +101,8 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .z10_restore = dcn31_z10_restore,
        .z10_save_init = dcn31_z10_save_init,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .optimize_pwr_state = dcn21_optimize_pwr_state,
+       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
        .update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
 
index 1889629..27afbe6 100644 (file)
@@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
        clk_src_regs(3, D),
        clk_src_regs(4, E)
 };
+/*pll_id being rempped in dmub, in driver it is logical instance*/
+static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
+       clk_src_regs(0, A),
+       clk_src_regs(1, B),
+       clk_src_regs(2, F),
+       clk_src_regs(3, G),
+       clk_src_regs(4, E)
+};
 
 static const struct dce110_clk_src_shift cs_shift = {
                CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
@@ -994,7 +1002,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .timing_trace = false,
        .clock_trace = true,
        .disable_pplib_clock_request = false,
-       .pipe_split_policy = MPC_SPLIT_AVOID,
+       .pipe_split_policy = MPC_SPLIT_DYNAMIC,
        .force_single_disp_pipe_split = false,
        .disable_dcc = DCC_ENABLE,
        .vsr_support = true,
@@ -2276,14 +2284,27 @@ static bool dcn31_resource_construct(
                        dcn30_clock_source_create(ctx, ctx->dc_bios,
                                CLOCK_SOURCE_COMBO_PHY_PLL1,
                                &clk_src_regs[1], false);
-       pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+       /*move phypllx_pixclk_resync to dmub next*/
+       if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+               pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+                       dcn30_clock_source_create(ctx, ctx->dc_bios,
+                               CLOCK_SOURCE_COMBO_PHY_PLL2,
+                               &clk_src_regs_b0[2], false);
+               pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+                       dcn30_clock_source_create(ctx, ctx->dc_bios,
+                               CLOCK_SOURCE_COMBO_PHY_PLL3,
+                               &clk_src_regs_b0[3], false);
+       } else {
+               pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
                        dcn30_clock_source_create(ctx, ctx->dc_bios,
                                CLOCK_SOURCE_COMBO_PHY_PLL2,
                                &clk_src_regs[2], false);
-       pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+               pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
                        dcn30_clock_source_create(ctx, ctx->dc_bios,
                                CLOCK_SOURCE_COMBO_PHY_PLL3,
                                &clk_src_regs[3], false);
+       }
+
        pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
                        dcn30_clock_source_create(ctx, ctx->dc_bios,
                                CLOCK_SOURCE_COMBO_PHY_PLL4,
index 416fe7a..a513363 100644 (file)
@@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
                const struct dc_init_data *init_data,
                struct dc *dc);
 
+/*temp: B0 specific before switch to dcn313 headers*/
+#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+
+//PHYPLLF_PIXCLK_RESYNC_CNTL
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+
+//PHYPLLG_PIXCLK_RESYNC_CNTL
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+#endif
 #endif /* _DCN31_RESOURCE_H_ */
index 7ec4331..a486769 100644 (file)
@@ -143,6 +143,55 @@ struct gc_info_v1_0 {
        uint32_t gc_num_gl2a;
 };
 
+struct gc_info_v1_1 {
+       struct gpu_info_header header;
+
+       uint32_t gc_num_se;
+       uint32_t gc_num_wgp0_per_sa;
+       uint32_t gc_num_wgp1_per_sa;
+       uint32_t gc_num_rb_per_se;
+       uint32_t gc_num_gl2c;
+       uint32_t gc_num_gprs;
+       uint32_t gc_num_max_gs_thds;
+       uint32_t gc_gs_table_depth;
+       uint32_t gc_gsprim_buff_depth;
+       uint32_t gc_parameter_cache_depth;
+       uint32_t gc_double_offchip_lds_buffer;
+       uint32_t gc_wave_size;
+       uint32_t gc_max_waves_per_simd;
+       uint32_t gc_max_scratch_slots_per_cu;
+       uint32_t gc_lds_size;
+       uint32_t gc_num_sc_per_se;
+       uint32_t gc_num_sa_per_se;
+       uint32_t gc_num_packer_per_sc;
+       uint32_t gc_num_gl2a;
+       uint32_t gc_num_tcp_per_sa;
+       uint32_t gc_num_sdp_interface;
+       uint32_t gc_num_tcps;
+};
+
+struct gc_info_v2_0 {
+       struct gpu_info_header header;
+
+       uint32_t gc_num_se;
+       uint32_t gc_num_cu_per_sh;
+       uint32_t gc_num_sh_per_se;
+       uint32_t gc_num_rb_per_se;
+       uint32_t gc_num_tccs;
+       uint32_t gc_num_gprs;
+       uint32_t gc_num_max_gs_thds;
+       uint32_t gc_gs_table_depth;
+       uint32_t gc_gsprim_buff_depth;
+       uint32_t gc_parameter_cache_depth;
+       uint32_t gc_double_offchip_lds_buffer;
+       uint32_t gc_wave_size;
+       uint32_t gc_max_waves_per_simd;
+       uint32_t gc_max_scratch_slots_per_cu;
+       uint32_t gc_lds_size;
+       uint32_t gc_num_sc_per_se;
+       uint32_t gc_num_packer_per_sc;
+};
+
 typedef struct harvest_info_header {
        uint32_t signature; /* Table Signature */
        uint32_t version;   /* Table Version */
index 8d796ed..619f8d3 100644 (file)
@@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle,
                pp_dpm_powergate_vce(handle, gate);
                break;
        case AMD_IP_BLOCK_TYPE_GMC:
-               pp_dpm_powergate_mmhub(handle);
+               /*
+                * For now, this is only used on PICASSO.
+                * And only "gate" operation is supported.
+                */
+               if (gate)
+                       pp_dpm_powergate_mmhub(handle);
                break;
        case AMD_IP_BLOCK_TYPE_GFX:
                ret = pp_dpm_powergate_gfx(handle, gate);
index 8a32445..9d7d64f 100644 (file)
@@ -1400,8 +1400,14 @@ static int smu_disable_dpms(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
+       /*
+        * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
+        * the workaround which always reset the asic in suspend.
+        * It's likely that workaround will be dropped in the future.
+        * Then the change here should be dropped together.
+        */
        bool use_baco = !smu->is_apu &&
-               ((amdgpu_in_reset(adev) &&
+               (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
                 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
 
@@ -1568,9 +1574,7 @@ static int smu_suspend(void *handle)
 
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
-       /* skip CGPG when in S0ix */
-       if (smu->is_apu && !adev->in_s0ix)
-               smu_set_gfx_cgpg(&adev->smu, false);
+       smu_set_gfx_cgpg(&adev->smu, false);
 
        return 0;
 }
@@ -1601,8 +1605,7 @@ static int smu_resume(void *handle)
                return ret;
        }
 
-       if (smu->is_apu)
-               smu_set_gfx_cgpg(&adev->smu, true);
+       smu_set_gfx_cgpg(&adev->smu, true);
 
        smu->disable_uclk_switch = 0;
 
index d60b8c5..9c91e79 100644 (file)
@@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
 
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
 {
-       if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+       /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
+       if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
                return 0;
 
        return smu_cmn_send_smc_msg_with_param(smu,
@@ -191,6 +192,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->watermarks_table);
        smu_table->watermarks_table = NULL;
 
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+
        return 0;
 }
 
index 59a7d27..7d50827 100644 (file)
@@ -1621,7 +1621,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
 {
        return smu_cmn_send_smc_msg_with_param(smu,
                                               SMU_MSG_GmiPwrDnControl,
-                                              en ? 1 : 0,
+                                              en ? 0 : 1,
                                               NULL);
 }
 
index 35145db..19a5d2c 100644 (file)
@@ -198,6 +198,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
 
 int smu_v13_0_check_fw_version(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        uint32_t if_version = 0xff, smu_version = 0xff;
        uint16_t smu_major;
        uint8_t smu_minor, smu_debug;
@@ -210,6 +211,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
        smu_major = (smu_version >> 16) & 0xffff;
        smu_minor = (smu_version >> 8) & 0xff;
        smu_debug = (smu_version >> 0) & 0xff;
+       if (smu->is_apu)
+               adev->pm.fw_version = smu_version;
 
        switch (smu->adev->ip_versions[MP1_HWIP][0]) {
        case IP_VERSION(13, 0, 2):
index 1e30eae..d5c98f7 100644 (file)
@@ -1121,7 +1121,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
        if (crtc->state)
                crtc->funcs->atomic_destroy_state(crtc, crtc->state);
 
-       __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+       if (ast_state)
+               __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+       else
+               __drm_atomic_helper_crtc_reset(crtc, NULL);
 }
 
 static struct drm_crtc_state *
index 8e7a124..22bf690 100644 (file)
@@ -1743,7 +1743,13 @@ void drm_fb_helper_fill_info(struct fb_info *info,
                               sizes->fb_width, sizes->fb_height);
 
        info->par = fb_helper;
-       snprintf(info->fix.id, sizeof(info->fix.id), "%s",
+       /*
+        * The DRM drivers fbdev emulation device name can be confusing if the
+        * driver name also has a "drm" suffix on it. Leading to names such as
+        * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't
+        * be changed due user-space tools (e.g: pm-utils) matching against it.
+        */
+       snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
                 fb_helper->dev->driver->name);
 
 }
index 7b9f69f..bca0de9 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/module.h>
 
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
index c9a9d74..c313a5b 100644 (file)
@@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 
        if (*fence) {
                ret = dma_fence_chain_find_seqno(fence, point);
-               if (!ret)
+               if (!ret) {
+                       /* If the requested seqno is already signaled
+                        * drm_syncobj_find_fence may return a NULL
+                        * fence. To make sure the recipient gets
+                        * signalled, use a new fence instead.
+                        */
+                       if (!*fence)
+                               *fence = dma_fence_get_stub();
+
                        goto out;
+               }
                dma_fence_put(*fence);
        } else {
                ret = -EINVAL;
index 2dc9d63..aef6952 100644 (file)
@@ -596,7 +596,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
                        continue;
 
                offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
-               if (fw->size - offset < 0) {
+               if (offset > fw->size) {
                        drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
                        continue;
                }
index fb33d03..c37c9f0 100644 (file)
@@ -564,6 +564,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
                container_of_user(base, typeof(*ext), base);
        const struct set_proto_ctx_engines *set = data;
        struct drm_i915_private *i915 = set->i915;
+       struct i915_engine_class_instance prev_engine;
        u64 flags;
        int err = 0, n, i, j;
        u16 slot, width, num_siblings;
@@ -629,7 +630,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
        /* Create contexts / engines */
        for (i = 0; i < width; ++i) {
                intel_engine_mask_t current_mask = 0;
-               struct i915_engine_class_instance prev_engine;
 
                for (j = 0; j < num_siblings; ++j) {
                        struct i915_engine_class_instance ci;
index 4d7da07..cb0bf6f 100644 (file)
@@ -3017,7 +3017,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
        fence_array = dma_fence_array_create(eb->num_batches,
                                             fences,
                                             eb->context->parallel.fence_context,
-                                            eb->context->parallel.seqno,
+                                            eb->context->parallel.seqno++,
                                             false);
        if (!fence_array) {
                kfree(fences);
@@ -3277,6 +3277,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
        if (IS_ERR(out_fence)) {
                err = PTR_ERR(out_fence);
+               out_fence = NULL;
                if (eb.requests[0])
                        goto err_request;
                else
index 67d14af..b67f620 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h> /* fault-inject.h is not standalone! */
 
 #include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
 
 #include "gem/i915_gem_lmem.h"
 #include "i915_trace.h"
index ed73d9b..2400d64 100644 (file)
@@ -1127,6 +1127,15 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
                    GAMT_CHKN_BIT_REG,
                    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 
+       /* Wa_1407352427:icl,ehl */
+       wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+                   PSDUNIT_CLKGATE_DIS);
+
+       /* Wa_1406680159:icl,ehl */
+       wa_write_or(wal,
+                   SUBSLICE_UNIT_LEVEL_CLKGATE,
+                   GWUNIT_CLKGATE_DIS);
+
        /* Wa_1607087056:icl,ehl,jsl */
        if (IS_ICELAKE(i915) ||
            IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1852,15 +1861,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
                            VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
 
-               /* Wa_1407352427:icl,ehl */
-               wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
-                           PSDUNIT_CLKGATE_DIS);
-
-               /* Wa_1406680159:icl,ehl */
-               wa_write_or(wal,
-                           SUBSLICE_UNIT_LEVEL_CLKGATE,
-                           GWUNIT_CLKGATE_DIS);
-
                /*
                 * Wa_1408767742:icl[a2..forever],ehl[all]
                 * Wa_1605460711:icl[a0..c0]
index c48557d..302e9ff 100644 (file)
@@ -1662,11 +1662,11 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
                GEM_BUG_ON(intel_context_is_parent(cn));
 
                list_del_init(&cn->guc_id.link);
-               ce->guc_id = cn->guc_id;
+               ce->guc_id.id = cn->guc_id.id;
 
-               spin_lock(&ce->guc_state.lock);
+               spin_lock(&cn->guc_state.lock);
                clr_context_registered(cn);
-               spin_unlock(&ce->guc_state.lock);
+               spin_unlock(&cn->guc_state.lock);
 
                set_context_guc_id_invalid(cn);
 
index 820a1f3..89cccef 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_breadcrumbs.h"
index 65fdca3..f74f804 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 #include <linux/clk.h>
+#include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 
index 5838c44..3196189 100644 (file)
@@ -1224,12 +1224,14 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
                        return MODE_BAD;
        }
 
-       if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
-               return MODE_BAD;
+       if (hdmi->conf) {
+               if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
+                       return MODE_BAD;
 
-       if (hdmi->conf->max_mode_clock &&
-           mode->clock > hdmi->conf->max_mode_clock)
-               return MODE_CLOCK_HIGH;
+               if (hdmi->conf->max_mode_clock &&
+                   mode->clock > hdmi->conf->max_mode_clock)
+                       return MODE_CLOCK_HIGH;
+       }
 
        if (mode->clock < 27000)
                return MODE_CLOCK_LOW;
index 4a1420b..086dacf 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
index 05d0b3e..0ae416a 100644 (file)
@@ -353,15 +353,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
                if (ret)
                        return ret;
-       }
 
-       fobj = dma_resv_shared_list(resv);
-       fence = dma_resv_excl_fence(resv);
+               fobj = NULL;
+       } else {
+               fobj = dma_resv_shared_list(resv);
+       }
 
-       if (fence) {
+       /* Waiting for the exclusive fence first causes performance regressions
+        * under some circumstances. So manually wait for the shared ones first.
+        */
+       for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
                struct nouveau_channel *prev = NULL;
                bool must_wait = true;
 
+               fence = rcu_dereference_protected(fobj->shared[i],
+                                               dma_resv_held(resv));
+
                f = nouveau_local_fence(fence, chan->drm);
                if (f) {
                        rcu_read_lock();
@@ -373,20 +380,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
                if (must_wait)
                        ret = dma_fence_wait(fence, intr);
-
-               return ret;
        }
 
-       if (!exclusive || !fobj)
-               return ret;
-
-       for (i = 0; i < fobj->shared_count && !ret; ++i) {
+       fence = dma_resv_excl_fence(resv);
+       if (fence) {
                struct nouveau_channel *prev = NULL;
                bool must_wait = true;
 
-               fence = rcu_dereference_protected(fobj->shared[i],
-                                               dma_resv_held(resv));
-
                f = nouveau_local_fence(fence, chan->drm);
                if (f) {
                        rcu_read_lock();
@@ -398,6 +398,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 
                if (must_wait)
                        ret = dma_fence_wait(fence, intr);
+
+               return ret;
        }
 
        return ret;
index 481b48b..5a6e898 100644 (file)
@@ -458,7 +458,7 @@ static struct drm_display_mode simpledrm_mode(unsigned int width,
 {
        struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
 
-       mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay;
+       mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
        drm_mode_set_name(&mode);
 
        return mode;
index 739f11c..047adc4 100644 (file)
@@ -1103,7 +1103,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
         * as an indication that we're about to swap out.
         */
        memset(&place, 0, sizeof(place));
-       place.mem_type = TTM_PL_SYSTEM;
+       place.mem_type = bo->resource->mem_type;
        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
                return -EBUSY;
 
@@ -1135,6 +1135,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                struct ttm_place hop;
 
                memset(&hop, 0, sizeof(hop));
+               place.mem_type = TTM_PL_SYSTEM;
                ret = ttm_resource_alloc(bo, &place, &evict_mem);
                if (unlikely(ret))
                        goto out;
index 7e83c00..79c870a 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/shmem_fs.h>
 #include <linux/file.h>
+#include <linux/module.h>
 #include <drm/drm_cache.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
index 9f5435b..a7c78ac 100644 (file)
@@ -207,14 +207,14 @@ config HID_CHERRY
 
 config HID_CHICONY
        tristate "Chicony devices"
-       depends on HID
+       depends on USB_HID
        default !EXPERT
        help
        Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
        tristate "Corsair devices"
-       depends on HID && USB && LEDS_CLASS
+       depends on USB_HID && LEDS_CLASS
        help
        Support for Corsair devices that are not fully compliant with the
        HID standard.
@@ -245,7 +245,7 @@ config HID_MACALLY
 
 config HID_PRODIKEYS
        tristate "Prodikeys PC-MIDI Keyboard support"
-       depends on HID && SND
+       depends on USB_HID && SND
        select SND_RAWMIDI
        help
        Support for Prodikeys PC-MIDI Keyboard device support.
@@ -560,7 +560,7 @@ config HID_LENOVO
 
 config HID_LOGITECH
        tristate "Logitech devices"
-       depends on HID
+       depends on USB_HID
        depends on LEDS_CLASS
        default !EXPERT
        help
@@ -951,7 +951,7 @@ config HID_SAITEK
 
 config HID_SAMSUNG
        tristate "Samsung InfraRed remote control or keyboards"
-       depends on HID
+       depends on USB_HID
        help
        Support for Samsung InfraRed remote control or keyboards.
 
index f3ecddc..08c9a9a 100644 (file)
@@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
                drvdata->tp = &asus_i2c_tp;
 
-       if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
-           hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
                struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 
                if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                drvdata->tp = &asus_t100chi_tp;
        }
 
-       if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
-           hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
                struct usb_host_interface *alt =
                        to_usb_interface(hdev->dev.parent)->altsetting;
 
index db6da21..74ad8bf 100644 (file)
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
                struct bigben_device, worker);
        struct hid_field *report_field = bigben->report->field[0];
 
-       if (bigben->removed)
+       if (bigben->removed || !report_field)
                return;
 
        if (bigben->work_led) {
index ca556d3..f04d2aa 100644 (file)
@@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
        ret = hid_parse(hdev);
        if (ret) {
index 902a60e..8c895c8 100644 (file)
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
        int ret;
        unsigned long quirks = id->driver_data;
        struct corsair_drvdata *drvdata;
-       struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
+       struct usb_interface *usbif;
+
+       if (!hid_is_usb(dev))
+               return -EINVAL;
+
+       usbif = to_usb_interface(dev->dev.parent);
 
        drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
                               GFP_KERNEL);
index 0210498..3091355 100644 (file)
@@ -50,7 +50,7 @@ struct elan_drvdata {
 
 static int is_not_elan_touchpad(struct hid_device *hdev)
 {
-       if (hdev->bus == BUS_USB) {
+       if (hid_is_usb(hdev)) {
                struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 
                return (intf->altsetting->desc.bInterfaceNumber !=
index 383dfda..8e960d7 100644 (file)
@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        struct usb_device *udev;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 8ee77f4..79505c6 100644 (file)
@@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct ft260_get_chip_version_report version;
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
index 8123b87..0403beb 100644 (file)
@@ -585,6 +585,8 @@ static void hammer_remove(struct hid_device *hdev)
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index 0a38e8e..403506b 100644 (file)
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 static int holtek_kbd_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       int ret = hid_parse(hdev);
+       struct usb_interface *intf;
+       int ret;
+
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
 
+       ret = hid_parse(hdev);
        if (!ret)
                ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 
+       intf = to_usb_interface(hdev->dev.parent);
        if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
                struct hid_input *hidinput;
                list_for_each_entry(hidinput, &hdev->inputs, list) {
index 195b735..7c90793 100644 (file)
@@ -62,6 +62,29 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        return rdesc;
 }
 
+static int holtek_mouse_probe(struct hid_device *hdev,
+                             const struct hid_device_id *id)
+{
+       int ret;
+
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       ret = hid_parse(hdev);
+       if (ret) {
+               hid_err(hdev, "hid parse failed: %d\n", ret);
+               return ret;
+       }
+
+       ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+       if (ret) {
+               hid_err(hdev, "hw start failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static const struct hid_device_id holtek_mouse_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
@@ -83,6 +106,7 @@ static struct hid_driver holtek_mouse_driver = {
        .name = "holtek_mouse",
        .id_table = holtek_mouse_devices,
        .report_fixup = holtek_mouse_report_fixup,
+       .probe = holtek_mouse_probe,
 };
 
 module_hid_driver(holtek_mouse_driver);
index 96a4559..19da077 100644 (file)
 #define USB_DEVICE_ID_HP_X2_10_COVER   0x0755
 #define I2C_DEVICE_ID_HP_ENVY_X360_15  0x2d05
 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15       0x2817
+#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN   0x2706
 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN   0x261A
 
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 #define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
 #define USB_DEVICE_ID_GOOGLE_DON       0x5050
+#define USB_DEVICE_ID_GOOGLE_EEL       0x5057
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
 #define USB_DEVICE_ID_MS_TOUCH_COVER_2   0x07a7
 #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
 #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+#define USB_DEVICE_ID_MS_SURFACE3_COVER                0x07de
 #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
 #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
 #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
index 217f2d1..03f9945 100644 (file)
@@ -325,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
          HID_BATTERY_QUIRK_IGNORE },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+         HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
index d40af91..fb3f725 100644 (file)
@@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
 
 static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
-       struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
-       __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+       struct usb_interface *iface;
+       __u8 iface_num;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
        struct lg_drv_data *drv_data;
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       iface = to_usb_interface(hdev->dev.parent);
+       iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+
        /* G29 only work with the 1st interface */
        if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
            (iface_num != 0)) {
index a0017b0..7106b92 100644 (file)
@@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev,
        case recvr_type_bluetooth:      no_dj_interfaces = 2; break;
        case recvr_type_dinovo:         no_dj_interfaces = 2; break;
        }
-       if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if (hid_is_usb(hdev)) {
                intf = to_usb_interface(hdev->dev.parent);
                if (intf && intf->altsetting->desc.bInterfaceNumber >=
                                                        no_dj_interfaces) {
index 2666af0..e4e9471 100644 (file)
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
 static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+       struct usb_interface *intf;
+       unsigned short ifnum;
        unsigned long quirks = id->driver_data;
        struct pk_device *pk;
        struct pcmidi_snd *pm = NULL;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       intf = to_usb_interface(hdev->dev.parent);
+       ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
        pk = kzalloc(sizeof(*pk), GFP_KERNEL);
        if (pk == NULL) {
                hid_err(hdev, "can't alloc descriptor\n");
index 06b7908..ee7e504 100644 (file)
@@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
index 4556d2a..d94ee05 100644 (file)
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index ce5f225..e95d59c 100644 (file)
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index ea17abc..76da048 100644 (file)
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 0316edf..1896c69 100644 (file)
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 5248b3c..cf8eeb3 100644 (file)
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 9600128..6fb9b95 100644 (file)
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 4a88a76..d5ddf0d 100644 (file)
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 989927d..4fcc8e7 100644 (file)
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 3956a6c..5bf1971 100644 (file)
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 818701f..a784bb4 100644 (file)
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 2e1c311..cf5992e 100644 (file)
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
        int ret;
        unsigned int cmask = HID_CONNECT_DEFAULT;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
index d1b107d..60ec2b2 100644 (file)
@@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        sc->quirks = quirks;
        hid_set_drvdata(hdev, sc);
        sc->hdev = hdev;
-       usbdev = to_usb_device(sc->hdev->dev.parent->parent);
 
        ret = hid_parse(hdev);
        if (ret) {
@@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
                hid_err(hdev, "failed to claim input\n");
-               hid_hw_stop(hdev);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err;
        }
 
        if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
+               if (!hid_is_usb(hdev)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+
                sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
-               if (!sc->ghl_urb)
-                       return -ENOMEM;
+               if (!sc->ghl_urb) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
 
                if (sc->quirks & GHL_GUITAR_PS3WIIU)
                        ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
@@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                                                           ARRAY_SIZE(ghl_ps4_magic_data));
                if (ret) {
                        hid_err(hdev, "error preparing URB\n");
-                       return ret;
+                       goto err;
                }
 
                timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
@@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        }
 
        return ret;
+
+err:
+       hid_hw_stop(hdev);
+       return ret;
 }
 
 static void sony_remove(struct hid_device *hdev)
index 3a53334..03b935f 100644 (file)
@@ -274,6 +274,9 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
        int ret = 0;
        struct tm_wheel *tm_wheel = NULL;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed with error %d\n", ret);
index 31ea7fc..ad489ca 100644 (file)
@@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev,
        unsigned int minor;
        int ret;
 
-       if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+       if (!hid_is_usb(hdev))
                return -EINVAL;
 
        dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
index 6a9865d..d8ab013 100644 (file)
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
        struct uclogic_drvdata *drvdata = NULL;
        bool params_initialized = false;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        /*
         * libinput requires the pad interface to be on a different node
         * than the pen, so use QUIRK_MULTI_INPUT for all tablets.
index 3d67b74..adff1bd 100644 (file)
@@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params,
        struct uclogic_params p = {0, };
 
        /* Check arguments */
-       if (params == NULL || hdev == NULL ||
-           !hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
                rc = -EINVAL;
                goto cleanup;
        }
index cd7ada4..72957a9 100644 (file)
@@ -57,6 +57,9 @@ static int vivaldi_probe(struct hid_device *hdev,
        int ret;
 
        drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
        hid_set_drvdata(hdev, drvdata);
 
        ret = hid_parse(hdev);
index 1c50390..8e9d945 100644 (file)
@@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
 
        if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
                        && IPC_IS_ISH_ILUP(fwsts)) {
-               disable_irq_wake(pdev->irq);
+               if (device_may_wakeup(&pdev->dev))
+                       disable_irq_wake(pdev->irq);
 
                ish_set_host_ready(dev);
 
@@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device)
                         */
                        pci_save_state(pdev);
 
-                       enable_irq_wake(pdev->irq);
+                       if (device_may_wakeup(&pdev->dev))
+                               enable_irq_wake(pdev->irq);
                }
        } else {
                /*
index 2717d39..066c567 100644 (file)
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
         * Skip the query for this type and modify defaults based on
         * interface number.
         */
-       if (features->type == WIRELESS) {
+       if (features->type == WIRELESS && intf) {
                if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
                        features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
                else
@@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
        if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
                char *product_name = wacom->hdev->name;
 
-               if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
+               if (hid_is_usb(wacom->hdev)) {
                        struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
                        struct usb_device *dev = interface_to_usbdev(intf);
                        product_name = dev->product;
@@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work)
 
        wacom_destroy_battery(wacom);
 
+       if (!usbdev)
+               return;
+
        /* Stylus interface */
        hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
        wacom1 = hid_get_drvdata(hdev1);
@@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work)
 static int wacom_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       struct usb_device *dev = interface_to_usbdev(intf);
        struct wacom *wacom;
        struct wacom_wac *wacom_wac;
        struct wacom_features *features;
@@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev,
        wacom_wac->hid_data.inputmode = -1;
        wacom_wac->mode_report = -1;
 
-       wacom->usbdev = dev;
-       wacom->intf = intf;
+       if (hid_is_usb(hdev)) {
+               struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+               struct usb_device *dev = interface_to_usbdev(intf);
+
+               wacom->usbdev = dev;
+               wacom->intf = intf;
+       }
+
        mutex_init(&wacom->lock);
        INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
        INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
index dd12af2..0747a8f 100644 (file)
@@ -19,6 +19,7 @@ config HYPERV_TIMER
 config HYPERV_UTILS
        tristate "Microsoft Hyper-V Utilities driver"
        depends on HYPERV && CONNECTOR && NLS
+       depends on PTP_1588_CLOCK_OPTIONAL
        help
          Select this option to enable the Hyper-V Utilities.
 
index 731d511..14389fd 100644 (file)
@@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
        corsairpsu_check_cmd_support(priv);
 
        priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
-                                                         &corsairpsu_chip_info, 0);
+                                                         &corsairpsu_chip_info, NULL);
 
        if (IS_ERR(priv->hwmon_dev)) {
                ret = PTR_ERR(priv->hwmon_dev);
index eaace47..5596c21 100644 (file)
@@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev)
 {
        struct dell_smm_data *data = dev_get_drvdata(dev);
 
-       /* Register the proc entry */
-       proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
-
-       devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
+       /* Only register exit function if creation was successful */
+       if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
+               devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
 }
 
 #else
index 618052c..74019df 100644 (file)
  * explicitly as max6659, or if its address is not 0x4c.
  * These chips lack the remote temperature offset feature.
  *
- * This driver also supports the MAX6654 chip made by Maxim. This chip can
- * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
- * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
- * by setting the configuration register accordingly, and is done during
- * initialization. Extended precision is only available at conversion rates
- * of 1 Hz and slower. Note that extended precision is not enabled by
- * default, as this driver initializes all chips to 2 Hz by design.
+ * This driver also supports the MAX6654 chip made by Maxim. This chip can be
+ * at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is similar
+ * to MAX6657/MAX6658/MAX6659, but does not support critical temperature
+ * limits. Extended range is available by setting the configuration register
+ * accordingly, and is done during initialization. Extended precision is only
+ * available at conversion rates of 1 Hz and slower. Note that extended
+ * precision is not enabled by default, as this driver initializes all chips
+ * to 2 Hz by design.
  *
  * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
  * MAX6692 chips made by Maxim.  These are again similar to the LM86,
@@ -188,6 +189,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
 #define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert                */
 #define LM90_HAVE_EXTENDED_TEMP        (1 << 8) /* extended temperature support*/
 #define LM90_PAUSE_FOR_CONFIG  (1 << 9) /* Pause conversion for config */
+#define LM90_HAVE_CRIT         (1 << 10)/* Chip supports CRIT/OVERT register   */
+#define LM90_HAVE_CRIT_ALRM_SWP        (1 << 11)/* critical alarm bits swapped */
 
 /* LM90 status */
 #define LM90_STATUS_LTHRM      (1 << 0) /* local THERM limit tripped */
@@ -197,6 +200,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
 #define LM90_STATUS_RHIGH      (1 << 4) /* remote high temp limit tripped */
 #define LM90_STATUS_LLOW       (1 << 5) /* local low temp limit tripped */
 #define LM90_STATUS_LHIGH      (1 << 6) /* local high temp limit tripped */
+#define LM90_STATUS_BUSY       (1 << 7) /* conversion is ongoing */
 
 #define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
 #define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
@@ -354,38 +358,43 @@ struct lm90_params {
 static const struct lm90_params lm90_params[] = {
        [adm1032] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 10,
        },
        [adt7461] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 10,
        },
        [g781] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
        },
        [lm86] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
        },
        [lm90] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
        },
        [lm99] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
        },
        [max6646] = {
+               .flags = LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 6,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -396,50 +405,51 @@ static const struct lm90_params lm90_params[] = {
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [max6657] = {
-               .flags = LM90_PAUSE_FOR_CONFIG,
+               .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [max6659] = {
-               .flags = LM90_HAVE_EMERGENCY,
+               .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [max6680] = {
-               .flags = LM90_HAVE_OFFSET,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
+                 | LM90_HAVE_CRIT_ALRM_SWP,
                .alert_alarms = 0x7c,
                .max_convrate = 7,
        },
        [max6696] = {
                .flags = LM90_HAVE_EMERGENCY
-                 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+                 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT,
                .alert_alarms = 0x1c7c,
                .max_convrate = 6,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [w83l771] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
        },
        [sa56004] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
                .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
        },
        [tmp451] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 9,
                .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
        },
        [tmp461] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 9,
                .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@@ -668,20 +678,22 @@ static int lm90_update_limits(struct device *dev)
        struct i2c_client *client = data->client;
        int val;
 
-       val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
-       if (val < 0)
-               return val;
-       data->temp8[LOCAL_CRIT] = val;
+       if (data->flags & LM90_HAVE_CRIT) {
+               val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+               if (val < 0)
+                       return val;
+               data->temp8[LOCAL_CRIT] = val;
 
-       val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
-       if (val < 0)
-               return val;
-       data->temp8[REMOTE_CRIT] = val;
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+               if (val < 0)
+                       return val;
+               data->temp8[REMOTE_CRIT] = val;
 
-       val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
-       if (val < 0)
-               return val;
-       data->temp_hyst = val;
+               val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+               if (val < 0)
+                       return val;
+               data->temp_hyst = val;
+       }
 
        val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
        if (val < 0)
@@ -809,7 +821,7 @@ static int lm90_update_device(struct device *dev)
                val = lm90_read_reg(client, LM90_REG_R_STATUS);
                if (val < 0)
                        return val;
-               data->alarms = val;     /* lower 8 bit of alarms */
+               data->alarms = val & ~LM90_STATUS_BUSY;
 
                if (data->kind == max6696) {
                        val = lm90_select_remote_channel(data, 1);
@@ -1160,8 +1172,8 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
        else
                temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
 
-       /* prevent integer underflow */
-       val = max(val, -128000l);
+       /* prevent integer overflow/underflow */
+       val = clamp_val(val, -128000l, 255000l);
 
        data->temp_hyst = hyst_to_reg(temp - val);
        err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -1192,6 +1204,7 @@ static const u8 lm90_temp_emerg_index[3] = {
 static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
 static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
 static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
+static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 };
 static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
 static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
 
@@ -1217,7 +1230,10 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
                *val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
                break;
        case hwmon_temp_crit_alarm:
-               *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
+               if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
+                       *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1;
+               else
+                       *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
                break;
        case hwmon_temp_emergency_alarm:
                *val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
@@ -1465,12 +1481,11 @@ static int lm90_detect(struct i2c_client *client,
        if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
                return -ENODEV;
 
-       if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
+       if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
                config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
                if (config2 < 0)
                        return -ENODEV;
-       } else
-               config2 = 0;            /* Make compiler happy */
+       }
 
        if ((address == 0x4C || address == 0x4D)
         && man_id == 0x01) { /* National Semiconductor */
@@ -1903,11 +1918,14 @@ static int lm90_probe(struct i2c_client *client)
        info->config = data->channel_config;
 
        data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
-               HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
-               HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
+               HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM;
        data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
-               HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
-               HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+               HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT;
+
+       if (data->flags & LM90_HAVE_CRIT) {
+               data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+               data->channel_config[1] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+       }
 
        if (data->flags & LM90_HAVE_OFFSET)
                data->channel_config[1] |= HWMON_T_OFFSET;
index 93dca47..57ce863 100644 (file)
@@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
 
        nct6775_wmi_set_bank(data, reg);
 
-       err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+       err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp);
        if (err)
                return 0;
 
index 17518b4..f12b9a2 100644 (file)
@@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
                        return ret;
        }
 
-       ctx->pwm_value = MAX_PWM;
-
        pwm_init_state(ctx->pwm, &ctx->pwm_state);
 
        /*
index 09c2a0b..3415d7a 100644 (file)
@@ -23,7 +23,7 @@
 /*
  * I2C command delays (in microseconds)
  */
-#define SHT4X_MEAS_DELAY       1000
+#define SHT4X_MEAS_DELAY_HPM   8200    /* see t_MEAS,h in datasheet */
 #define SHT4X_DELAY_EXTRA      10000
 
 /*
@@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data)
        if (ret < 0)
                goto unlock;
 
-       usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+       usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
 
        ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
        if (ret != SHT4X_RESPONSE_LENGTH) {
index a6ea1eb..53b8da6 100644 (file)
@@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
        status = readb(i2c->base + MPC_I2C_SR);
        if (status & CSR_MIF) {
                /* Wait up to 100us for transfer to properly complete */
-               readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
+               readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
                writeb(0, i2c->base + MPC_I2C_SR);
                mpc_i2c_do_intr(i2c, status);
                return IRQ_HANDLED;
index 9537878..41eb0dc 100644 (file)
 /**
  * struct virtio_i2c - virtio I2C data
  * @vdev: virtio device for this controller
- * @completion: completion of virtio I2C message
  * @adap: I2C adapter for this controller
  * @vq: the virtio virtqueue for communication
  */
 struct virtio_i2c {
        struct virtio_device *vdev;
-       struct completion completion;
        struct i2c_adapter adap;
        struct virtqueue *vq;
 };
 
 /**
  * struct virtio_i2c_req - the virtio I2C request structure
+ * @completion: completion of virtio I2C message
  * @out_hdr: the OUT header of the virtio I2C message
  * @buf: the buffer into which data is read, or from which it's written
  * @in_hdr: the IN header of the virtio I2C message
  */
 struct virtio_i2c_req {
+       struct completion completion;
        struct virtio_i2c_out_hdr out_hdr       ____cacheline_aligned;
        uint8_t *buf                            ____cacheline_aligned;
        struct virtio_i2c_in_hdr in_hdr         ____cacheline_aligned;
@@ -47,9 +47,11 @@ struct virtio_i2c_req {
 
 static void virtio_i2c_msg_done(struct virtqueue *vq)
 {
-       struct virtio_i2c *vi = vq->vdev->priv;
+       struct virtio_i2c_req *req;
+       unsigned int len;
 
-       complete(&vi->completion);
+       while ((req = virtqueue_get_buf(vq, &len)))
+               complete(&req->completion);
 }
 
 static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
@@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
        for (i = 0; i < num; i++) {
                int outcnt = 0, incnt = 0;
 
+               init_completion(&reqs[i].completion);
+
                /*
                 * Only 7-bit mode supported for this moment. For the address
                 * format, Please check the Virtio I2C Specification.
@@ -106,21 +110,15 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
                                    struct virtio_i2c_req *reqs,
                                    struct i2c_msg *msgs, int num)
 {
-       struct virtio_i2c_req *req;
        bool failed = false;
-       unsigned int len;
        int i, j = 0;
 
        for (i = 0; i < num; i++) {
-               /* Detach the ith request from the vq */
-               req = virtqueue_get_buf(vq, &len);
+               struct virtio_i2c_req *req = &reqs[i];
 
-               /*
-                * Condition req == &reqs[i] should always meet since we have
-                * total num requests in the vq. reqs[i] can never be NULL here.
-                */
-               if (!failed && (WARN_ON(req != &reqs[i]) ||
-                               req->in_hdr.status != VIRTIO_I2C_MSG_OK))
+               wait_for_completion(&req->completion);
+
+               if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
                        failed = true;
 
                i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
@@ -156,12 +154,8 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * remote here to clear the virtqueue, so we can try another set of
         * messages later on.
         */
-
-       reinit_completion(&vi->completion);
        virtqueue_kick(vq);
 
-       wait_for_completion(&vi->completion);
-
        count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
 
 err_free:
@@ -210,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
        vdev->priv = vi;
        vi->vdev = vdev;
 
-       init_completion(&vi->completion);
-
        ret = virtio_i2c_setup_vqs(vi);
        if (ret)
                return ret;
index bce0e8b..cf5d049 100644 (file)
@@ -535,6 +535,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
                                   sizeof(rdwr_arg)))
                        return -EFAULT;
 
+               if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
+                       return -EINVAL;
+
                if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
                        return -EINVAL;
 
index a51fdd3..24c9387 100644 (file)
@@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
        return 0;
 
 err_buffer_cleanup:
-       if (data->dready_trig)
-               iio_triggered_buffer_cleanup(indio_dev);
+       iio_triggered_buffer_cleanup(indio_dev);
 err_trigger_unregister:
        if (data->dready_trig)
                iio_trigger_unregister(data->dready_trig);
@@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
 
+       iio_triggered_buffer_cleanup(indio_dev);
        if (data->dready_trig) {
-               iio_triggered_buffer_cleanup(indio_dev);
                iio_trigger_unregister(data->dready_trig);
                iio_trigger_unregister(data->motion_trig);
        }
index 2faf85c..552eba5 100644 (file)
@@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
                               hw_values.chan,
                               sizeof(hw_values.chan));
        if (ret) {
-               dev_err(st->dev,
-                       "error reading data\n");
-               return ret;
+               dev_err(st->dev, "error reading data: %d\n", ret);
+               goto out;
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev,
                                           &hw_values,
                                           iio_get_time_ns(indio_dev));
+out:
        iio_trigger_notify_done(indio_dev->trig);
 
        return IRQ_HANDLED;
index 715b813..09c7f10 100644 (file)
@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
        if (ret)
                return ret;
 
-       indio_dev->trig = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        return 0;
 }
index 8bf5b62..3363af1 100644 (file)
@@ -532,7 +532,7 @@ config IMX7D_ADC
 
 config IMX8QXP_ADC
        tristate "NXP IMX8QXP ADC driver"
-       depends on ARCH_MXC_ARM64 || COMPILE_TEST
+       depends on ARCH_MXC || COMPILE_TEST
        depends on HAS_IOMEM
        help
          Say yes here to build support for IMX8QXP ADC.
index 2c5c8a3..aa42ba7 100644 (file)
@@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
        iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
                                           iio_get_time_ns(indio_dev));
 
-       iio_trigger_notify_done(indio_dev->trig);
 err_unlock:
+       iio_trigger_notify_done(indio_dev->trig);
        mutex_unlock(&st->lock);
 
        return IRQ_HANDLED;
index 4c922ef..92a57cf 100644 (file)
@@ -1586,7 +1586,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
                *val = st->conversion_value;
                ret = at91_adc_adjust_val_osr(st, val);
                if (chan->scan_type.sign == 's')
-                       *val = sign_extend32(*val, 11);
+                       *val = sign_extend32(*val,
+                                            chan->scan_type.realbits - 1);
                st->conversion_done = false;
        }
 
index 3e0c023..df99f13 100644 (file)
@@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
                          struct iio_chan_spec const *chan, int *val)
 {
        struct axp20x_adc_iio *info = iio_priv(indio_dev);
-       int size;
 
-       /*
-        * N.B.: Unlike the Chinese datasheets tell, the charging current is
-        * stored on 12 bits, not 13 bits. Only discharging current is on 13
-        * bits.
-        */
-       if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
-               size = 13;
-       else
-               size = 12;
-
-       *val = axp20x_read_variable_width(info->regmap, chan->address, size);
+       *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
        if (*val < 0)
                return *val;
 
@@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
                return IIO_VAL_INT_PLUS_MICRO;
 
        case IIO_CURRENT:
-               *val = 0;
-               *val2 = 500000;
-               return IIO_VAL_INT_PLUS_MICRO;
+               *val = 1;
+               return IIO_VAL_INT;
 
        case IIO_TEMP:
                *val = 100;
index 1640766..97d162a 100644 (file)
@@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
 static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
 {
        int ret, i;
-       struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
        u16 conflict;
        __le16 value;
        int olen = sizeof(value);
@@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
                .chan = channel,
        };
 
-       ret = iio_device_claim_direct_mode(indio_dev);
-       if (ret < 0)
-               return ret;
-
        ret = dln2_adc_set_chan_enabled(dln2, channel, true);
        if (ret < 0)
-               goto release_direct;
+               return ret;
 
        ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
        if (ret < 0) {
@@ -300,8 +295,6 @@ disable_port:
        dln2_adc_set_port_enabled(dln2, false, NULL);
 disable_chan:
        dln2_adc_set_chan_enabled(dln2, channel, false);
-release_direct:
-       iio_device_release_direct_mode(indio_dev);
 
        return ret;
 }
@@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret < 0)
+                       return ret;
+
                mutex_lock(&dln2->mutex);
                ret = dln2_adc_read(dln2, chan->channel);
                mutex_unlock(&dln2->mutex);
 
+               iio_device_release_direct_mode(indio_dev);
+
                if (ret < 0)
                        return ret;
 
@@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        iio_trigger_set_drvdata(dln2->trig, dln2);
-       devm_iio_trigger_register(dev, dln2->trig);
+       ret = devm_iio_trigger_register(dev, dln2->trig);
+       if (ret) {
+               dev_err(dev, "failed to register trigger: %d\n", ret);
+               return ret;
+       }
        iio_trigger_set_immutable(indio_dev, dln2->trig);
 
        ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
index 6245434..8cd258c 100644 (file)
@@ -1117,6 +1117,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
 {
        struct stm32_adc *adc = iio_priv(indio_dev);
 
+       stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
        stm32h7_adc_disable(indio_dev);
        stm32_adc_int_ch_disable(adc);
        stm32h7_adc_enter_pwr_down(adc);
@@ -1986,7 +1987,7 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
                        /* Get calibration data for vrefint channel */
                        ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
                        if (ret && ret != -ENOENT) {
-                               return dev_err_probe(&indio_dev->dev, ret,
+                               return dev_err_probe(indio_dev->dev.parent, ret,
                                                     "nvmem access error\n");
                        }
                        if (ret == -ENOENT)
index 3e0734d..600e972 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/bitfield.h>
+#include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
@@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
                goto err_unlock;
        }
 
-       *val = temp;
+       *val = sign_extend32(temp, 15);
 
 err_unlock:
        mutex_unlock(&st->lock);
@@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
        }
 
        /* extract lower 12 bits temperature reading */
-       *val = temp & 0x0FFF;
+       *val = sign_extend32(temp, 11);
 
 err_unlock:
        mutex_unlock(&st->lock);
index 04dd6a7..4cfa0d4 100644 (file)
@@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
 
        iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
 
+error_ret:
        iio_trigger_notify_done(indio_dev->trig);
 
-error_ret:
        return IRQ_HANDLED;
 }
 
index b23caa2..93990ff 100644 (file)
@@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
                irq_modify_status(trig->subirq_base + i,
                                  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
        }
-       get_device(&trig->dev);
 
        return trig;
 
index 7e51aaa..b2983b1 100644 (file)
@@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
                ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
                                       als_buf, sizeof(als_buf));
                if (ret < 0)
-                       return ret;
+                       goto done;
                if (test_bit(0, indio_dev->active_scan_mask))
                        scan.channels[j++] = le16_to_cpu(als_buf[1]);
                if (test_bit(1, indio_dev->active_scan_mask))
index 07e9184..fc63856 100644 (file)
@@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        mutex_lock(&data->lock);
        ret = regmap_field_read(data->reg_flag_nf, &dir);
        if (ret < 0) {
-               dev_err(&data->client->dev, "register read failed\n");
-               mutex_unlock(&data->lock);
-               return ret;
+               dev_err(&data->client->dev, "register read failed: %d\n", ret);
+               goto out;
        }
        event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
                                     IIO_EV_TYPE_THRESH,
@@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        ret = regmap_field_write(data->reg_flag_psint, 0);
        if (ret < 0)
                dev_err(&data->client->dev, "failed to reset interrupts\n");
+out:
        mutex_unlock(&data->lock);
 
        return IRQ_HANDLED;
index 3308387..4353b74 100644 (file)
@@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
 };
 module_platform_driver(stm32_timer_trigger_driver);
 
-MODULE_ALIAS("platform: stm32-timer-trigger");
+MODULE_ALIAS("platform:stm32-timer-trigger");
 MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
 MODULE_LICENSE("GPL v2");
index b8d715c..11a0806 100644 (file)
@@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
        struct rdma_ah_attr *src = ah_attr;
        struct rdma_ah_attr conv_ah;
 
-       memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
+       memset(&dst->grh, 0, sizeof(dst->grh));
 
        if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
            (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
index 2f2c764..a02916a 100644 (file)
@@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi)
        uapi->num_write_ex = max_write_ex + 1;
        data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
                             sizeof(*uapi->write_methods), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
        for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
                data[i] = &uapi->notsupp_method;
        uapi->write_methods = data;
index ec37f4f..f1245c9 100644 (file)
@@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
  */
 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
 {
+       if (!rcd->rcvhdrq)
+               return;
        clear_recv_intr(rcd);
        if (check_packet_present(rcd))
                force_recv_intr(rcd);
index 61f341c..e2c634a 100644 (file)
@@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
        struct hfi1_packet packet;
        int skip_pkt = 0;
 
+       if (!rcd->rcvhdrq)
+               return RCV_PKT_OK;
        /* Control context will always use the slow path interrupt handler */
        needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
 
index dbd1c31..4436ed4 100644 (file)
@@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
        rcd->fast_handler = get_dma_rtail_setting(rcd) ?
                                handle_receive_interrupt_dma_rtail :
                                handle_receive_interrupt_nodma_rtail;
-       rcd->slow_handler = handle_receive_interrupt;
 
        hfi1_set_seq_cnt(rcd, 1);
 
@@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
                rcd->numa_id = numa;
                rcd->rcv_array_groups = dd->rcv_entries.ngroups;
                rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+               rcd->slow_handler = handle_receive_interrupt;
+               rcd->do_interrupt = rcd->slow_handler;
                rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
 
                mutex_init(&rcd->exp_mutex);
@@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
        if (ret)
                goto done;
 
-       /* allocate dummy tail memory for all receive contexts */
-       dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
-                                                        sizeof(u64),
-                                                        &dd->rcvhdrtail_dummy_dma,
-                                                        GFP_KERNEL);
-
-       if (!dd->rcvhdrtail_dummy_kvaddr) {
-               dd_dev_err(dd, "cannot allocate dummy tail memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
        /* dd->rcd can be NULL if early initialization failed */
        for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
                /*
@@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
                if (!rcd)
                        continue;
 
-               rcd->do_interrupt = &handle_receive_interrupt;
-
                lastfail = hfi1_create_rcvhdrq(dd, rcd);
                if (!lastfail)
                        lastfail = hfi1_setup_eagerbufs(rcd);
@@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
        rcd->egrbufs.rcvtids = NULL;
 
        for (e = 0; e < rcd->egrbufs.alloced; e++) {
-               if (rcd->egrbufs.buffers[e].dma)
+               if (rcd->egrbufs.buffers[e].addr)
                        dma_free_coherent(&dd->pcidev->dev,
                                          rcd->egrbufs.buffers[e].len,
                                          rcd->egrbufs.buffers[e].addr,
@@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
        dd->tx_opstats    = NULL;
        kfree(dd->comp_vect);
        dd->comp_vect = NULL;
+       if (dd->rcvhdrtail_dummy_kvaddr)
+               dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+                                 (void *)dd->rcvhdrtail_dummy_kvaddr,
+                                 dd->rcvhdrtail_dummy_dma);
+       dd->rcvhdrtail_dummy_kvaddr = NULL;
        sdma_clean(dd, dd->num_sdma);
        rvt_dealloc_device(&dd->verbs_dev.rdi);
 }
@@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
                goto bail;
        }
 
+       /* allocate dummy tail memory for all receive contexts */
+       dd->rcvhdrtail_dummy_kvaddr =
+               dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
+                                  &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
+       if (!dd->rcvhdrtail_dummy_kvaddr) {
+               ret = -ENOMEM;
+               goto bail;
+       }
+
        atomic_set(&dd->ipoib_rsm_usr_num, 0);
        return dd;
 
@@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
 
        free_credit_return(dd);
 
-       if (dd->rcvhdrtail_dummy_kvaddr) {
-               dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
-                                 (void *)dd->rcvhdrtail_dummy_kvaddr,
-                                 dd->rcvhdrtail_dummy_dma);
-               dd->rcvhdrtail_dummy_kvaddr = NULL;
-       }
-
        /*
         * Free any resources still in use (usually just kernel contexts)
         * at unload; we do for ctxtcnt, because that's what we allocate.
index 2b6c24b..f07d328 100644 (file)
@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
        if (current->nr_cpus_allowed != 1)
                goto out;
 
-       cpu_id = smp_processor_id();
        rcu_read_lock();
+       cpu_id = smp_processor_id();
        rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
                                     sdma_rht_params);
 
index 9bfbadd..eb0defa 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/acpi.h>
 #include <linux/etherdevice.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <net/addrconf.h>
@@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
                                        unsigned long instance_stage,
                                        unsigned long reset_stage)
 {
+#define HW_RESET_TIMEOUT_US 1000000
+#define HW_RESET_SLEEP_US 1000
+
        struct hns_roce_v2_priv *priv = hr_dev->priv;
        struct hnae3_handle *handle = priv->handle;
        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+       unsigned long val;
+       int ret;
 
        /* When hardware reset is detected, we should stop sending mailbox&cmq&
         * doorbell to hardware. If now in .init_instance() function, we should
@@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
         * again.
         */
        hr_dev->dis_db = true;
-       if (!ops->get_hw_reset_stat(handle))
+
+       ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
+                               val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
+                               HW_RESET_TIMEOUT_US, false, handle);
+       if (!ret)
                hr_dev->is_reset = true;
 
        if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
@@ -1584,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
 {
        struct hns_roce_cmq_desc desc;
        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+       u32 clock_cycles_of_1us;
 
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
                                      false);
 
-       hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+               clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
+       else
+               clock_cycles_of_1us = HNS_ROCE_1US_CFG;
+
+       hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
        hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
 
        return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -4792,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
        return ret;
 }
 
+static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+{
+#define QP_ACK_TIMEOUT_MAX_HIP08 20
+#define QP_ACK_TIMEOUT_OFFSET 10
+#define QP_ACK_TIMEOUT_MAX 31
+
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+               if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
+                       ibdev_warn(&hr_dev->ib_dev,
+                                  "Local ACK timeout shall be 0 to 20.\n");
+                       return false;
+               }
+               *timeout += QP_ACK_TIMEOUT_OFFSET;
+       } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
+               if (*timeout > QP_ACK_TIMEOUT_MAX) {
+                       ibdev_warn(&hr_dev->ib_dev,
+                                  "Local ACK timeout shall be 0 to 31.\n");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
                                      const struct ib_qp_attr *attr,
                                      int attr_mask,
@@ -4801,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
        int ret = 0;
+       u8 timeout;
 
        if (attr_mask & IB_QP_AV) {
                ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@@ -4810,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
        }
 
        if (attr_mask & IB_QP_TIMEOUT) {
-               if (attr->timeout < 31) {
-                       hr_reg_write(context, QPC_AT, attr->timeout);
+               timeout = attr->timeout;
+               if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
+                       hr_reg_write(context, QPC_AT, timeout);
                        hr_reg_clear(qpc_mask, QPC_AT);
-               } else {
-                       ibdev_warn(&hr_dev->ib_dev,
-                                  "Local ACK timeout shall be 0 to 30.\n");
                }
        }
 
@@ -4872,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
                set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
 
        if (attr_mask & IB_QP_MIN_RNR_TIMER) {
-               hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
+               hr_reg_write(context, QPC_MIN_RNR_TIME,
+                           hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+                           HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
                hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
        }
 
@@ -5489,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 
        hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
        hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
+
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+               if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+                       dev_info(hr_dev->dev,
+                                "cq_period(%u) reached the upper limit, adjusted to 65.\n",
+                                cq_period);
+                       cq_period = HNS_ROCE_MAX_CQ_PERIOD;
+               }
+               cq_period *= HNS_ROCE_CLOCK_ADJUST;
+       }
        hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
        hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
 
@@ -5884,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
        hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
        hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
 
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+               if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+                       dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
+                                eq->eq_period);
+                       eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
+               }
+               eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
+       }
+
        hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
        hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
        hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
@@ -6387,10 +6447,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
        if (!hr_dev)
                return 0;
 
-       hr_dev->is_reset = true;
        hr_dev->active = false;
        hr_dev->dis_db = true;
-
        hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
 
        return 0;
index 4d904d5..35c61da 100644 (file)
@@ -1444,6 +1444,14 @@ struct hns_roce_dip {
        struct list_head node;  /* all dips are on a list */
 };
 
+/* only for RNR timeout issue of HIP08 */
+#define HNS_ROCE_CLOCK_ADJUST 1000
+#define HNS_ROCE_MAX_CQ_PERIOD 65
+#define HNS_ROCE_MAX_EQ_PERIOD 65
+#define HNS_ROCE_RNR_TIMER_10NS 1
+#define HNS_ROCE_1US_CFG 999
+#define HNS_ROCE_1NS_CFG 0
+
 #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
 #define HNS_ROCE_AEQ_DEFAULT_INTERVAL  0x0
 #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
index 6eee9de..e64ef69 100644 (file)
@@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
 
 static void free_srq_wrid(struct hns_roce_srq *srq)
 {
-       kfree(srq->wrid);
+       kvfree(srq->wrid);
        srq->wrid = NULL;
 }
 
index 4108dca..4b1b16e 100644 (file)
@@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
 {
        struct irdma_cq *cq = iwcq->back_cq;
 
+       if (!cq->user_mode)
+               cq->armed = false;
        if (cq->ibcq.comp_handler)
                cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 }
@@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
                qp->flush_code = FLUSH_PROT_ERR;
                break;
        case IRDMA_AE_AMP_BAD_QP:
+       case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
                qp->flush_code = FLUSH_LOC_QP_OP_ERR;
                break;
        case IRDMA_AE_AMP_BAD_STAG_KEY:
@@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_PRIV_OPERATION_DENIED:
        case IRDMA_AE_IB_INVALID_REQUEST:
        case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
-       case IRDMA_AE_IB_REMOTE_OP_ERROR:
                qp->flush_code = FLUSH_REM_ACCESS_ERR;
                qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
                break;
@@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
                qp->flush_code = FLUSH_MW_BIND_ERR;
                break;
+       case IRDMA_AE_IB_REMOTE_OP_ERROR:
+               qp->flush_code = FLUSH_REM_OP_ERR;
+               break;
        default:
                qp->flush_code = FLUSH_FATAL_ERR;
                break;
@@ -545,7 +550,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
        struct irdma_sc_dev *dev = &rf->sc_dev;
 
        dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
-       irq_set_affinity_hint(msix_vec->irq, NULL);
+       irq_update_affinity_hint(msix_vec->irq, NULL);
        free_irq(msix_vec->irq, dev_id);
 }
 
@@ -1095,7 +1100,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
        }
        cpumask_clear(&msix_vec->mask);
        cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
-       irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
+       irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
        if (status) {
                ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
                return IRDMA_ERR_CFG;
index 91a4971..cb218ca 100644 (file)
@@ -542,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
                    void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
                    void *cb_param);
 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
                         void *ptr);
 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
index aeeb1c3..fed49da 100644 (file)
@@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                list_del(&chunk->list);
                if (chunk->type == PBLE_SD_PAGED)
                        irdma_pble_free_paged_mem(chunk);
-               if (chunk->bitmapbuf)
-                       kfree(chunk->bitmapmem.va);
+               bitmap_free(chunk->bitmapbuf);
                kfree(chunk->chunkmem.va);
        }
 }
@@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                  "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
                  pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
        pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
-       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
        sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
                             sd_entry->u.pd_table.pd_page_addr.pa :
                             sd_entry->u.bp.addr.pa;
@@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                        goto error;
        }
 
+       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
        sd_entry->valid = true;
        return 0;
 
 error:
-       if (chunk->bitmapbuf)
-               kfree(chunk->bitmapmem.va);
+       bitmap_free(chunk->bitmapbuf);
        kfree(chunk->chunkmem.va);
 
        return ret_code;
index e1b3b81..aa20827 100644 (file)
@@ -78,7 +78,6 @@ struct irdma_chunk {
        u32 pg_cnt;
        enum irdma_alloc_type type;
        struct irdma_sc_dev *dev;
-       struct irdma_virt_mem bitmapmem;
        struct irdma_virt_mem chunkmem;
 };
 
index 8b42c43..398736d 100644 (file)
@@ -2239,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
 
        sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
 
-       pchunk->bitmapmem.size = sizeofbitmap >> 3;
-       pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
-
-       if (!pchunk->bitmapmem.va)
+       pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+       if (!pchunk->bitmapbuf)
                return IRDMA_ERR_NO_MEMORY;
 
-       pchunk->bitmapbuf = pchunk->bitmapmem.va;
-       bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
-
        pchunk->sizeofbitmap = sizeofbitmap;
        /* each pble is 8 bytes hence shift by 3 */
        pprm->total_pble_alloc += pchunk->size >> 3;
@@ -2491,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
        ibevent.element.qp = &iwqp->ibqp;
        iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
 }
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+       struct irdma_cq_uk *ukcq;
+       u64 qword3;
+       __le64 *cqe;
+       u8 polarity;
+
+       ukcq  = &iwcq->sc_cq.cq_uk;
+       cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+       get_64bit_val(cqe, 24, &qword3);
+       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+       return polarity != ukcq->polarity;
+}
index 0f66e80..8cd5f92 100644 (file)
@@ -3584,18 +3584,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
        struct irdma_cq *iwcq;
        struct irdma_cq_uk *ukcq;
        unsigned long flags;
-       enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+       enum irdma_cmpl_notify cq_notify;
+       bool promo_event = false;
+       int ret = 0;
 
+       cq_notify = notify_flags == IB_CQ_SOLICITED ?
+                   IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
        iwcq = to_iwcq(ibcq);
        ukcq = &iwcq->sc_cq.cq_uk;
-       if (notify_flags == IB_CQ_SOLICITED)
-               cq_notify = IRDMA_CQ_COMPL_SOLICITED;
 
        spin_lock_irqsave(&iwcq->lock, flags);
-       irdma_uk_cq_request_notification(ukcq, cq_notify);
+       /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+       if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+               promo_event = true;
+
+       if (!iwcq->armed || promo_event) {
+               iwcq->armed = true;
+               iwcq->last_notify = cq_notify;
+               irdma_uk_cq_request_notification(ukcq, cq_notify);
+       }
+
+       if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+               ret = 1;
        spin_unlock_irqrestore(&iwcq->lock, flags);
 
-       return 0;
+       return ret;
 }
 
 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
index 5c244cd..d0fdef8 100644 (file)
@@ -110,6 +110,8 @@ struct irdma_cq {
        u16 cq_size;
        u16 cq_num;
        bool user_mode;
+       bool armed;
+       enum irdma_cmpl_notify last_notify;
        u32 polled_cmpls;
        u32 cq_mem_size;
        struct irdma_dma_mem kmem;
index ac11943..bf2f30d 100644 (file)
@@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                                               &addrlimit) ||
                            addrlimit > type_max(typeof(pkt->addrlimit))) {
                                ret = -EINVAL;
-                               goto free_pbc;
+                               goto free_pkt;
                        }
                        pkt->addrlimit = addrlimit;
 
index 53271df..bcf717b 100644 (file)
@@ -135,19 +135,19 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
 
        ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
        if (ret)
-               goto err_out;
+               return -ENOMEM;
 
        if (both) {
                ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
-               if (ret) {
-                       rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
-                       goto err_out;
-               }
+               if (ret)
+                       goto err_free;
        }
 
        return 0;
 
-err_out:
+err_free:
+       rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+       mr->cur_map_set = NULL;
        return -ENOMEM;
 }
 
@@ -214,7 +214,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
                                pr_warn("%s: Unable to get virtual address\n",
                                                __func__);
                                err = -ENOMEM;
-                               goto err_cleanup_map;
+                               goto err_release_umem;
                        }
 
                        buf->addr = (uintptr_t)vaddr;
@@ -237,8 +237,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        return 0;
 
-err_cleanup_map:
-       rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
 err_release_umem:
        ib_umem_release(umem);
 err_out:
index 9753218..54b8711 100644 (file)
@@ -359,6 +359,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
 
 err2:
        rxe_queue_cleanup(qp->sq.queue);
+       qp->sq.queue = NULL;
 err1:
        qp->pd = NULL;
        qp->rcq = NULL;
index f7e459f..76e4352 100644 (file)
@@ -19,7 +19,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
        int cpu;
 
        cpu = raw_smp_processor_id();
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        if (con->cpu != cpu) {
                s->cpu_migr.to++;
 
@@ -27,14 +27,16 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
                s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
                atomic_inc(&s->cpu_migr.from);
        }
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
 {
        struct rtrs_clt_stats_pcpu *s;
 
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        s->rdma.failover_cnt++;
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,9 +171,10 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
 {
        struct rtrs_clt_stats_pcpu *s;
 
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        s->rdma.dir[d].cnt++;
        s->rdma.dir[d].size_total += size;
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
index 429411c..a85a4f3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/serio.h>
+#include <asm/unaligned.h>
 
 #define DRIVER_DESC    "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
 
@@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
 
                case 'D':                                       /* Ball data */
                        if (spaceball->idx != 15) return;
-                       for (i = 0; i < 6; i++)
+                       /*
+                        * Skip first three bytes; read six axes worth of data.
+                        * Axis values are signed 16-bit big-endian.
+                        */
+                       data += 3;
+                       for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
                                input_report_abs(dev, spaceball_axes[i],
-                                       (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2]));
+                                       (__s16)get_unaligned_be16(&data[i * 2]));
+                       }
                        break;
 
                case 'K':                                       /* Button data */
index d57e996..23b5dd9 100644 (file)
@@ -456,9 +456,10 @@ struct iqs626_private {
        unsigned int suspend_mode;
 };
 
-static int iqs626_parse_events(struct iqs626_private *iqs626,
-                              const struct fwnode_handle *ch_node,
-                              enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_events(struct iqs626_private *iqs626,
+                   const struct fwnode_handle *ch_node,
+                   enum iqs626_ch_id ch_id)
 {
        struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
        struct i2c_client *client = iqs626->client;
@@ -604,9 +605,10 @@ static int iqs626_parse_events(struct iqs626_private *iqs626,
        return 0;
 }
 
-static int iqs626_parse_ati_target(struct iqs626_private *iqs626,
-                                  const struct fwnode_handle *ch_node,
-                                  enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_ati_target(struct iqs626_private *iqs626,
+                       const struct fwnode_handle *ch_node,
+                       enum iqs626_ch_id ch_id)
 {
        struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
        struct i2c_client *client = iqs626->client;
@@ -885,9 +887,10 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
        return 0;
 }
 
-static int iqs626_parse_channel(struct iqs626_private *iqs626,
-                               const struct fwnode_handle *ch_node,
-                               enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_channel(struct iqs626_private *iqs626,
+                    const struct fwnode_handle *ch_node,
+                    enum iqs626_ch_id ch_id)
 {
        struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
        struct i2c_client *client = iqs626->client;
index bfa2665..627048b 100644 (file)
@@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
        set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
        set_bit(BTN_LEFT, input_dev->keybit);
 
+       INIT_WORK(&dev->work, atp_reinit);
+
        error = input_register_device(dev->input);
        if (error)
                goto err_free_buffer;
@@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
        /* save our data pointer in this interface device */
        usb_set_intfdata(iface, dev);
 
-       INIT_WORK(&dev->work, atp_reinit);
-
        return 0;
 
  err_free_buffer:
index 956d9cd..ece97f8 100644 (file)
@@ -1588,7 +1588,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = {
  */
 static int elantech_change_report_id(struct psmouse *psmouse)
 {
-       unsigned char param[2] = { 0x10, 0x03 };
+       /*
+        * NOTE: the code is expecting to receive param[] as an array of 3
+        * items (see __ps2_command()), even if in this case only 2 are
+        * actually needed. Make sure the array size is 3 to avoid potential
+        * stack out-of-bound accesses.
+        */
+       unsigned char param[3] = { 0x10, 0x03 };
 
        if (elantech_write_reg_params(psmouse, 0x7, param) ||
            elantech_read_reg_params(psmouse, 0x7, param) ||
index aedd055..148a7c5 100644 (file)
@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
        { }
 };
 
+static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+       {
+               /* ASUS ZenBook UX425UA */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+               },
+       },
+       {
+               /* ASUS ZenBook UM325UA */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+               },
+       },
+       { }
+};
+
 #endif /* CONFIG_X86 */
 
 #ifdef CONFIG_PNP
@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
        if (dmi_check_system(i8042_dmi_kbdreset_table))
                i8042_kbdreset = true;
 
+       if (dmi_check_system(i8042_dmi_probe_defer_table))
+               i8042_probe_defer = true;
+
        /*
         * A20 was already enabled during early kernel init. But some buggy
         * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
index 0b9f1d0..3fc0a89 100644 (file)
@@ -45,6 +45,10 @@ static bool i8042_unlock;
 module_param_named(unlock, i8042_unlock, bool, 0);
 MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
 
+static bool i8042_probe_defer;
+module_param_named(probe_defer, i8042_probe_defer, bool, 0);
+MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
+
 enum i8042_controller_reset_mode {
        I8042_RESET_NEVER,
        I8042_RESET_ALWAYS,
@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
  * LCS/Telegraphics.
  */
 
-static int __init i8042_check_mux(void)
+static int i8042_check_mux(void)
 {
        unsigned char mux_version;
 
@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
 /*
  * The following is used to test AUX IRQ delivery.
  */
-static struct completion i8042_aux_irq_delivered __initdata;
-static bool i8042_irq_being_tested __initdata;
+static struct completion i8042_aux_irq_delivered;
+static bool i8042_irq_being_tested;
 
-static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
+static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
 {
        unsigned long flags;
        unsigned char str, data;
@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
  * verifies success by readinng CTR. Used when testing for presence of AUX
  * port.
  */
-static int __init i8042_toggle_aux(bool on)
+static int i8042_toggle_aux(bool on)
 {
        unsigned char param;
        int i;
@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
  * the presence of an AUX interface.
  */
 
-static int __init i8042_check_aux(void)
+static int i8042_check_aux(void)
 {
        int retval = -1;
        bool irq_registered = false;
@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
 
                if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
                        pr_err("Can't read CTR while initializing i8042\n");
-                       return -EIO;
+                       return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
                }
 
        } while (n < 2 || ctr[0] != ctr[1]);
@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
        i8042_controller_reset(false);
 }
 
-static int __init i8042_create_kbd_port(void)
+static int i8042_create_kbd_port(void)
 {
        struct serio *serio;
        struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
        return 0;
 }
 
-static int __init i8042_create_aux_port(int idx)
+static int i8042_create_aux_port(int idx)
 {
        struct serio *serio;
        int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
        return 0;
 }
 
-static void __init i8042_free_kbd_port(void)
+static void i8042_free_kbd_port(void)
 {
        kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
        i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
 }
 
-static void __init i8042_free_aux_ports(void)
+static void i8042_free_aux_ports(void)
 {
        int i;
 
@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
        }
 }
 
-static void __init i8042_register_ports(void)
+static void i8042_register_ports(void)
 {
        int i;
 
@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
        i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
 }
 
-static int __init i8042_setup_aux(void)
+static int i8042_setup_aux(void)
 {
        int (*aux_enable)(void);
        int error;
@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
        return error;
 }
 
-static int __init i8042_setup_kbd(void)
+static int i8042_setup_kbd(void)
 {
        int error;
 
@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
        return 0;
 }
 
-static int __init i8042_probe(struct platform_device *dev)
+static int i8042_probe(struct platform_device *dev)
 {
        int error;
 
@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
                .pm     = &i8042_pm_ops,
 #endif
        },
+       .probe          = i8042_probe,
        .remove         = i8042_remove,
        .shutdown       = i8042_shutdown,
 };
@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
 
 static int __init i8042_init(void)
 {
-       struct platform_device *pdev;
        int err;
 
        dbg_init();
@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
        /* Set this before creating the dev to allow i8042_command to work right away */
        i8042_present = true;
 
-       pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
-       if (IS_ERR(pdev)) {
-               err = PTR_ERR(pdev);
+       err = platform_driver_register(&i8042_driver);
+       if (err)
                goto err_platform_exit;
+
+       i8042_platform_device = platform_device_alloc("i8042", -1);
+       if (!i8042_platform_device) {
+               err = -ENOMEM;
+               goto err_unregister_driver;
        }
 
+       err = platform_device_add(i8042_platform_device);
+       if (err)
+               goto err_free_device;
+
        bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
        panic_blink = i8042_panic_blink;
 
        return 0;
 
+err_free_device:
+       platform_device_put(i8042_platform_device);
+err_unregister_driver:
+       platform_driver_unregister(&i8042_driver);
  err_platform_exit:
        i8042_platform_exit();
        return err;
index 05de92c..eb66cd2 100644 (file)
@@ -1882,7 +1882,7 @@ static int mxt_read_info_block(struct mxt_data *data)
        if (error) {
                dev_err(&client->dev, "Error %d parsing object table\n", error);
                mxt_free_object_table(data);
-               goto err_free_mem;
+               return error;
        }
 
        data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
index 7e13a66..879a4d9 100644 (file)
 #define ELAN_POWERON_DELAY_USEC        500
 #define ELAN_RESET_DELAY_MSEC  20
 
+/* FW boot code version */
+#define BC_VER_H_BYTE_FOR_EKTH3900x1_I2C        0x72
+#define BC_VER_H_BYTE_FOR_EKTH3900x2_I2C        0x82
+#define BC_VER_H_BYTE_FOR_EKTH3900x3_I2C        0x92
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C        0x6D
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C        0x6E
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C       0x77
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C       0x78
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB    0x67
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB    0x68
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB   0x74
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB   0x75
+
 enum elants_chip_id {
        EKTH3500,
        EKTF3624,
@@ -736,6 +749,37 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
        return 0;
 }
 
+static bool elants_i2c_should_check_remark_id(struct elants_data *ts)
+{
+       struct i2c_client *client = ts->client;
+       const u8 bootcode_version = ts->iap_version;
+       bool check;
+
+       /* I2C eKTH3900 and eKTH5312 are NOT support Remark ID */
+       if ((bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x1_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x2_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x3_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB)) {
+               dev_dbg(&client->dev,
+                       "eKTH3900/eKTH5312(0x%02x) are not support remark id\n",
+                       bootcode_version);
+               check = false;
+       } else if (bootcode_version >= 0x60) {
+               check = true;
+       } else {
+               check = false;
+       }
+
+       return check;
+}
+
 static int elants_i2c_do_update_firmware(struct i2c_client *client,
                                         const struct firmware *fw,
                                         bool force)
@@ -749,7 +793,7 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
        u16 send_id;
        int page, n_fw_pages;
        int error;
-       bool check_remark_id = ts->iap_version >= 0x60;
+       bool check_remark_id = elants_i2c_should_check_remark_id(ts);
 
        /* Recovery mode detection! */
        if (force) {
index b5cc917..aaa3c45 100644 (file)
@@ -102,6 +102,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
        { .id = "911", .data = &gt911_chip_data },
        { .id = "9271", .data = &gt911_chip_data },
        { .id = "9110", .data = &gt911_chip_data },
+       { .id = "9111", .data = &gt911_chip_data },
        { .id = "927", .data = &gt911_chip_data },
        { .id = "928", .data = &gt911_chip_data },
 
@@ -650,10 +651,16 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
 
        usleep_range(6000, 10000);              /* T4: > 5ms */
 
-       /* end select I2C slave addr */
-       error = gpiod_direction_input(ts->gpiod_rst);
-       if (error)
-               goto error;
+       /*
+        * Put the reset pin back in to input / high-impedance mode to save
+        * power. Only do this in the non ACPI case since some ACPI boards
+        * don't have a pull-up, so there the reset pin must stay active-high.
+        */
+       if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
+               error = gpiod_direction_input(ts->gpiod_rst);
+               if (error)
+                       goto error;
+       }
 
        return 0;
 
@@ -787,6 +794,14 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
                return -EINVAL;
        }
 
+       /*
+        * Normally we put the reset pin in input / high-impedance mode to save
+        * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
+        * case, leave the pin as is. This results in the pin not being touched
+        * at all on x86/ACPI boards, except when needed for error-recover.
+        */
+       ts->gpiod_rst_flags = GPIOD_ASIS;
+
        return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
 }
 #else
@@ -812,6 +827,12 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
                return -EINVAL;
        dev = &ts->client->dev;
 
+       /*
+        * By default we request the reset pin as input, leaving it in
+        * high-impedance when not resetting the controller to save power.
+        */
+       ts->gpiod_rst_flags = GPIOD_IN;
+
        ts->avdd28 = devm_regulator_get(dev, "AVDD28");
        if (IS_ERR(ts->avdd28)) {
                error = PTR_ERR(ts->avdd28);
@@ -849,7 +870,7 @@ retry_get_irq_gpio:
        ts->gpiod_int = gpiod;
 
        /* Get the reset line GPIO pin number */
-       gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
+       gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
        if (IS_ERR(gpiod)) {
                error = PTR_ERR(gpiod);
                if (error != -EPROBE_DEFER)
index 62138f9..02065d1 100644 (file)
@@ -87,6 +87,7 @@ struct goodix_ts_data {
        struct gpio_desc *gpiod_rst;
        int gpio_count;
        int gpio_int_idx;
+       enum gpiod_flags gpiod_rst_flags;
        char id[GOODIX_ID_MAX_LEN + 1];
        char cfg_name[64];
        u16 version;
index c1e7a24..191d4f3 100644 (file)
@@ -207,7 +207,7 @@ static int goodix_firmware_upload(struct goodix_ts_data *ts)
 
        error = goodix_reset_no_int_sync(ts);
        if (error)
-               return error;
+               goto release;
 
        error = goodix_enter_upload_mode(ts->client);
        if (error)
index 3759dc3..2543ef6 100644 (file)
@@ -707,7 +707,7 @@ static const struct irq_domain_ops aic_ipi_domain_ops = {
        .free = aic_ipi_free,
 };
 
-static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
+static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
 {
        struct irq_domain *ipi_domain;
        int base_ipi;
index 80906bf..5b8d571 100644 (file)
@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
        int hwirq, i;
 
        mutex_lock(&msi_used_lock);
+       hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+                                       order_base_2(nr_irqs));
+       mutex_unlock(&msi_used_lock);
 
-       hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
-                                          0, nr_irqs, 0);
-       if (hwirq >= PCI_MSI_DOORBELL_NR) {
-               mutex_unlock(&msi_used_lock);
+       if (hwirq < 0)
                return -ENOSPC;
-       }
-
-       bitmap_set(msi_used, hwirq, nr_irqs);
-       mutex_unlock(&msi_used_lock);
 
        for (i = 0; i < nr_irqs; i++) {
                irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
                                    NULL, NULL);
        }
 
-       return hwirq;
+       return 0;
 }
 
 static void armada_370_xp_msi_free(struct irq_domain *domain,
@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 
        mutex_lock(&msi_used_lock);
-       bitmap_clear(msi_used, d->hwirq, nr_irqs);
+       bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
        mutex_unlock(&msi_used_lock);
 }
 
index f3c6855..18b77c3 100644 (file)
@@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
                generic_handle_domain_irq(scu_ic->irq_domain,
                                          bit - scu_ic->irq_shift);
 
-               regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
-                                  BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+               regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+                                 BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
        }
 
        chained_irq_exit(chip, desc);
index d80e67a..bb6609c 100644 (file)
@@ -238,6 +238,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
        }
 
        data->num_parent_irqs = platform_irq_count(pdev);
+       put_device(&pdev->dev);
        if (data->num_parent_irqs <= 0) {
                pr_err("invalid number of parent interrupts\n");
                ret = -ENOMEM;
index ee83eb3..d25b7a8 100644 (file)
@@ -746,7 +746,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return NULL;
+       return desc->its_invall_cmd.col;
 }
 
 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
index d02b05a..ff89b36 100644 (file)
@@ -9,6 +9,7 @@
 
 #define pr_fmt(fmt) "irq-mips-gic: " fmt
 
+#include <linux/bitfield.h>
 #include <linux/bitmap.h>
 #include <linux/clocksource.h>
 #include <linux/cpuhotplug.h>
@@ -735,8 +736,7 @@ static int __init gic_of_init(struct device_node *node,
        mips_gic_base = ioremap(gic_base, gic_len);
 
        gicconfig = read_gic_config();
-       gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
-       gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
+       gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
        gic_shared_intrs = (gic_shared_intrs + 1) * 8;
 
        if (cpu_has_veic) {
index 63bac3f..ba4759b 100644 (file)
@@ -26,7 +26,7 @@
 
 #define NVIC_ISER              0x000
 #define NVIC_ICER              0x080
-#define NVIC_IPR               0x300
+#define NVIC_IPR               0x400
 
 #define NVIC_MAX_BANKS         16
 /*
index 55891e4..a41b4b2 100644 (file)
@@ -381,7 +381,7 @@ mISDNInit(void)
        err = mISDN_inittimer(&debug);
        if (err)
                goto error2;
-       err = l1_init(&debug);
+       err = Isdnl1_Init(&debug);
        if (err)
                goto error3;
        err = Isdnl2_Init(&debug);
@@ -395,7 +395,7 @@ mISDNInit(void)
 error5:
        Isdnl2_cleanup();
 error4:
-       l1_cleanup();
+       Isdnl1_cleanup();
 error3:
        mISDN_timer_cleanup();
 error2:
@@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
 {
        misdn_sock_cleanup();
        Isdnl2_cleanup();
-       l1_cleanup();
+       Isdnl1_cleanup();
        mISDN_timer_cleanup();
        class_unregister(&mISDN_class);
 
index 23b44d3..42599f4 100644 (file)
@@ -60,8 +60,8 @@ struct Bprotocol      *get_Bprotocol4id(u_int);
 extern int     mISDN_inittimer(u_int *);
 extern void    mISDN_timer_cleanup(void);
 
-extern int     l1_init(u_int *);
-extern void    l1_cleanup(void);
+extern int     Isdnl1_Init(u_int *);
+extern void    Isdnl1_cleanup(void);
 extern int     Isdnl2_Init(u_int *);
 extern void    Isdnl2_cleanup(void);
 
index 98a3bc6..7b31c25 100644 (file)
@@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
 EXPORT_SYMBOL(create_l1);
 
 int
-l1_init(u_int *deb)
+Isdnl1_Init(u_int *deb)
 {
        debug = deb;
        l1fsm_s.state_count = L1S_STATE_COUNT;
@@ -409,7 +409,7 @@ l1_init(u_int *deb)
 }
 
 void
-l1_cleanup(void)
+Isdnl1_cleanup(void)
 {
        mISDN_FsmFree(&l1fsm_s);
 }
index 78073ad..16982c1 100644 (file)
@@ -1298,7 +1298,7 @@ static int flexrm_startup(struct mbox_chan *chan)
        val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
        cpumask_set_cpu((ring->num / val) % num_online_cpus(),
                        &ring->irq_aff_hint);
-       ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
+       ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
        if (ret) {
                dev_err(ring->mbox->dev,
                        "failed to set IRQ affinity hint for ring%d\n",
@@ -1425,7 +1425,7 @@ static void flexrm_shutdown(struct mbox_chan *chan)
 
        /* Release IRQ */
        if (ring->irq_requested) {
-               irq_set_affinity_hint(ring->irq, NULL);
+               irq_update_affinity_hint(ring->irq, NULL);
                free_irq(ring->irq, ring);
                ring->irq_requested = false;
        }
index 86b9e35..140f35d 100644 (file)
@@ -1139,6 +1139,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
 static void cached_dev_detach_finish(struct work_struct *w)
 {
        struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+       struct cache_set *c = dc->disk.c;
 
        BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
        BUG_ON(refcount_read(&dc->count));
@@ -1156,7 +1157,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
 
        bcache_device_detach(&dc->disk);
        list_move(&dc->list, &uncached_devices);
-       calc_cached_dev_sectors(dc->disk.c);
+       calc_cached_dev_sectors(c);
 
        clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
        clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
index 6319dec..7af242d 100644 (file)
@@ -1963,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
 retry_kmap:
-               mem = bvec_kmap_local(&bv);
+               mem = kmap_local_page(bv.bv_page);
                if (likely(dio->op == REQ_OP_WRITE))
                        flush_dcache_page(bv.bv_page);
 
index 5111ed9..41d6e23 100644 (file)
@@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
 
                if (!num_sectors || num_sectors > max_sectors)
                        num_sectors = max_sectors;
+               rdev->sb_start = sb_start;
        }
        sb = page_address(rdev->sb_page);
        sb->data_size = cpu_to_le64(num_sectors);
@@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev)
        spin_lock(&mddev->lock);
        mddev->pers = NULL;
        spin_unlock(&mddev->lock);
-       pers->free(mddev, mddev->private);
+       if (mddev->private)
+               pers->free(mddev, mddev->private);
        mddev->private = NULL;
        if (pers->sync_request && mddev->to_remove == NULL)
                mddev->to_remove = &md_redundancy_group;
index 7053233..cb670f1 100644 (file)
@@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
 
                memcpy(n, dm_block_data(child),
                       dm_bm_block_size(dm_tm_get_bm(info->tm)));
-               dm_tm_unlock(info->tm, child);
 
                dm_tm_dec(info->tm, dm_block_location(child));
+               dm_tm_unlock(info->tm, child);
                return 0;
        }
 
index 7dc8026..8550542 100644 (file)
@@ -1496,12 +1496,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                if (!r1_bio->bios[i])
                        continue;
 
-               if (first_clone && test_bit(WriteMostly, &rdev->flags)) {
+               if (first_clone) {
                        /* do behind I/O ?
                         * Not if there are too many, or cannot
                         * allocate memory, or a reader on WriteMostly
                         * is waiting for behind writes to flush */
                        if (bitmap &&
+                           test_bit(WriteMostly, &rdev->flags) &&
                            (atomic_read(&bitmap->behind_writes)
                             < mddev->bitmap_info.max_write_behind) &&
                            !waitqueue_active(&bitmap->behind_wait)) {
index 8c72eb5..6ac509c 100644 (file)
@@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device)
        mutex_lock(&pcr->pcr_mutex);
        rtsx_pci_power_off(pcr, HOST_ENTER_S3);
 
-       free_irq(pcr->irq, (void *)pcr);
-
        mutex_unlock(&pcr->pcr_mutex);
 
        pcr->is_runtime_suspended = true;
@@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
        mutex_lock(&pcr->pcr_mutex);
 
        rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
-       rtsx_pci_acquire_irq(pcr);
-       synchronize_irq(pcr->irq);
 
        if (pcr->ops->fetch_vendor_settings)
                pcr->ops->fetch_vendor_settings(pcr);
index 6323254..b38978a 100644 (file)
@@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids);
 static int at25_probe(struct spi_device *spi)
 {
        struct at25_data        *at25 = NULL;
-       struct spi_eeprom       chip;
        int                     err;
        int                     sr;
        u8 id[FM25_ID_LEN];
@@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi)
        if (match && !strcmp(match->compatible, "cypress,fm25"))
                is_fram = 1;
 
+       at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
+       if (!at25)
+               return -ENOMEM;
+
        /* Chip description */
-       if (!spi->dev.platform_data) {
-               if (!is_fram) {
-                       err = at25_fw_to_chip(&spi->dev, &chip);
-                       if (err)
-                               return err;
-               }
-       } else
-               chip = *(struct spi_eeprom *)spi->dev.platform_data;
+       if (spi->dev.platform_data) {
+               memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
+       } else if (!is_fram) {
+               err = at25_fw_to_chip(&spi->dev, &at25->chip);
+               if (err)
+                       return err;
+       }
 
        /* Ping the chip ... the status register is pretty portable,
         * unlike probing manufacturer IDs.  We do expect that system
@@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi)
                return -ENXIO;
        }
 
-       at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
-       if (!at25)
-               return -ENOMEM;
-
        mutex_init(&at25->lock);
-       at25->chip = chip;
        at25->spi = spi;
        spi_set_drvdata(spi, at25);
 
@@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi)
                        dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
                        return -ENODEV;
                }
-               chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+               at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
 
                if (at25->chip.byte_len > 64 * 1024)
                        at25->chip.flags |= EE_ADDR3;
@@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi)
        at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
        at25->nvmem_config.name = dev_name(&spi->dev);
        at25->nvmem_config.dev = &spi->dev;
-       at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+       at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
        at25->nvmem_config.root_only = true;
        at25->nvmem_config.owner = THIS_MODULE;
        at25->nvmem_config.compat = true;
@@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi)
        at25->nvmem_config.priv = at25;
        at25->nvmem_config.stride = 1;
        at25->nvmem_config.word_size = 1;
-       at25->nvmem_config.size = chip.byte_len;
+       at25->nvmem_config.size = at25->chip.byte_len;
 
        at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
        if (IS_ERR(at25->nvmem))
                return PTR_ERR(at25->nvmem);
 
        dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
-                (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
-                (chip.byte_len < 1024) ? "Byte" : "KByte",
+                (at25->chip.byte_len < 1024) ?
+                       at25->chip.byte_len : (at25->chip.byte_len / 1024),
+                (at25->chip.byte_len < 1024) ? "Byte" : "KByte",
                 at25->chip.name, is_fram ? "fram" : "eeprom",
-                (chip.flags & EE_READONLY) ? " (readonly)" : "",
+                (at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
                 at25->chip.page_size);
        return 0;
 }
index 39aca77..4ccbf43 100644 (file)
@@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
 {
        u64 size = 0;
-       int i;
+       int oix;
 
        size = ALIGN(metalen, FASTRPC_ALIGN);
-       for (i = 0; i < ctx->nscalars; i++) {
+       for (oix = 0; oix < ctx->nbufs; oix++) {
+               int i = ctx->olaps[oix].raix;
+
                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
 
-                       if (ctx->olaps[i].offset == 0)
+                       if (ctx->olaps[oix].offset == 0)
                                size = ALIGN(size, FASTRPC_ALIGN);
 
-                       size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
+                       size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
                }
        }
 
index 240c5af..368f104 100644 (file)
@@ -2264,7 +2264,7 @@ void mmc_start_host(struct mmc_host *host)
        _mmc_detect_change(host, 0, false);
 }
 
-void mmc_stop_host(struct mmc_host *host)
+void __mmc_stop_host(struct mmc_host *host)
 {
        if (host->slot.cd_irq >= 0) {
                mmc_gpio_set_cd_wake(host, false);
@@ -2273,6 +2273,11 @@ void mmc_stop_host(struct mmc_host *host)
 
        host->rescan_disable = 1;
        cancel_delayed_work_sync(&host->detect);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+       __mmc_stop_host(host);
 
        /* clear pm flags now and let card drivers set them as needed */
        host->pm_flags = 0;
index 7931a4f..f5f3f62 100644 (file)
@@ -70,6 +70,7 @@ static inline void mmc_delay(unsigned int ms)
 
 void mmc_rescan(struct work_struct *work);
 void mmc_start_host(struct mmc_host *host);
+void __mmc_stop_host(struct mmc_host *host);
 void mmc_stop_host(struct mmc_host *host);
 
 void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
index d4683b1..cf140f4 100644 (file)
@@ -80,9 +80,18 @@ static void mmc_host_classdev_release(struct device *dev)
        kfree(host);
 }
 
+static int mmc_host_classdev_shutdown(struct device *dev)
+{
+       struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+       __mmc_stop_host(host);
+       return 0;
+}
+
 static struct class mmc_host_class = {
        .name           = "mmc_host",
        .dev_release    = mmc_host_classdev_release,
+       .shutdown_pre   = mmc_host_classdev_shutdown,
        .pm             = MMC_HOST_CLASS_DEV_PM_OPS,
 };
 
index 7cd9c0e..8fdd0bb 100644 (file)
@@ -135,6 +135,7 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
                                    struct mmc_command *cmd)
 {
        struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+       bool manual_stop = false;
        u32 ictl, send;
        int pack_len;
 
@@ -172,12 +173,27 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
                else
                        /* software flush: */
                        ictl |= MESON_SDHC_ICTL_DATA_XFER_OK;
+
+               /*
+                * Mimic the logic from the vendor driver where (only)
+                * SD_IO_RW_EXTENDED commands with more than one block set the
+                * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware
+                * download in the brcmfmac driver for a BCM43362/1 card.
+                * Without this sdio_memcpy_toio() (with a size of 219557
+                * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set.
+                */
+               manual_stop = cmd->data->blocks > 1 &&
+                             cmd->opcode == SD_IO_RW_EXTENDED;
        } else {
                pack_len = 0;
 
                ictl |= MESON_SDHC_ICTL_RESP_OK;
        }
 
+       regmap_update_bits(host->regmap, MESON_SDHC_MISC,
+                          MESON_SDHC_MISC_MANUAL_STOP,
+                          manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0);
+
        if (cmd->opcode == MMC_STOP_TRANSMISSION)
                send |= MESON_SDHC_SEND_DATA_STOP;
 
index fdaa11f..a75d3dd 100644 (file)
@@ -441,6 +441,8 @@ static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
                return -EINVAL;
        }
 
+       writel_relaxed(0, dlyb->base + DLYB_CR);
+
        phase = end_of_len - max_len / 2;
        sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
 
index 943940b..6327752 100644 (file)
@@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
                        sdr_set_field(host->base + PAD_DS_TUNE,
                                      PAD_DS_TUNE_DLY1, i);
                ret = mmc_get_ext_csd(card, &ext_csd);
-               if (!ret)
+               if (!ret) {
                        result_dly1 |= (1 << i);
+                       kfree(ext_csd);
+               }
        }
        host->hs400_tuning = false;
 
index a4407f3..f5b2684 100644 (file)
@@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        /* Issue CMD19 twice for each tap */
        for (i = 0; i < 2 * priv->tap_num; i++) {
-               int cmd_error;
+               int cmd_error = 0;
 
                /* Set sampling clock position */
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
index a500187..9762ffa 100644 (file)
@@ -356,23 +356,6 @@ static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
        }
 }
 
-static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
-                                             struct mmc_ios *ios)
-{
-       struct sdhci_host *host = mmc_priv(mmc);
-       u32 val;
-
-       val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-       if (ios->enhanced_strobe)
-               val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-       else
-               val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-
-       sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-}
-
 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -793,6 +776,32 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
        }
 }
 
+static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
+                                             struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       u32 val;
+
+       val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+       if (ios->enhanced_strobe) {
+               val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+               /*
+                * When CMD13 is sent from mmc_select_hs400es() after
+                * switching to HS400ES mode, the bus is operating at
+                * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
+                * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
+                * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
+                * controller CAR clock and the interface clock are rate matched.
+                */
+               tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
+       } else {
+               val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+       }
+
+       sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+}
+
 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
index 9802e26..2b317ed 100644 (file)
@@ -96,6 +96,13 @@ struct dataflash {
        struct mtd_info         mtd;
 };
 
+static const struct spi_device_id dataflash_dev_ids[] = {
+       { "at45" },
+       { "dataflash" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, dataflash_dev_ids);
+
 #ifdef CONFIG_OF
 static const struct of_device_id dataflash_dt_ids[] = {
        { .compatible = "atmel,at45", },
@@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = {
                .name           = "mtd_dataflash",
                .of_match_table = of_match_ptr(dataflash_dt_ids),
        },
+       .id_table = dataflash_dev_ids,
 
        .probe          = dataflash_probe,
        .remove         = dataflash_remove,
index 67b7cb6..0a45d3c 100644 (file)
@@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI
 config MTD_NAND_DENALI_DT
        tristate "Denali NAND controller as a DT device"
        select MTD_NAND_DENALI
-       depends on HAS_DMA && HAVE_CLK && OF
+       depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM
        help
          Enable the driver for NAND flash on platforms using a Denali NAND
          controller as a DT device.
index 658f0cb..6b2bda8 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
 
 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
 
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ *  TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ *  TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL        7000
+#define TINDEL 5000
+
 struct fsmc_nand_timings {
        u8 tclr;
        u8 tar;
@@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
 {
        unsigned long hclk = clk_get_rate(host->clk);
        unsigned long hclkn = NSEC_PER_SEC / hclk;
-       u32 thiz, thold, twait, tset;
+       u32 thiz, thold, twait, tset, twait_min;
 
        if (sdrt->tRC_min < 30000)
                return -EOPNOTSUPP;
@@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        else if (tims->thold > FSMC_THOLD_MASK)
                tims->thold = FSMC_THOLD_MASK;
 
-       twait = max(sdrt->tRP_min, sdrt->tWP_min);
-       tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
-       if (tims->twait == 0)
-               tims->twait = 1;
-       else if (tims->twait > FSMC_TWAIT_MASK)
-               tims->twait = FSMC_TWAIT_MASK;
-
        tset = max(sdrt->tCS_min - sdrt->tWP_min,
                   sdrt->tCEA_max - sdrt->tREA_max);
        tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
@@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        else if (tims->tset > FSMC_TSET_MASK)
                tims->tset = FSMC_TSET_MASK;
 
+       /*
+        * According to SPEAr300 Reference Manual (RM0082) which gives more
+        * information related to FSMSC timings than the SPEAr600 one (RM0305),
+        *   twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+        */
+       twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+                   + TOUTDEL + TINDEL;
+       twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+       tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+       if (tims->twait == 0)
+               tims->twait = 1;
+       else if (tims->twait > FSMC_TWAIT_MASK)
+               tims->twait = FSMC_TWAIT_MASK;
+
        return 0;
 }
 
@@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
                                                instr->ctx.waitrdy.timeout_ms);
                        break;
                }
+
+               if (instr->delay_ns)
+                       ndelay(instr->delay_ns);
        }
 
        return ret;
index 3d6c6e8..a130320 100644 (file)
@@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
                                 struct nand_sdr_timings *spec_timings)
 {
        const struct nand_controller_ops *ops = chip->controller->ops;
-       int best_mode = 0, mode, ret;
+       int best_mode = 0, mode, ret = -EOPNOTSUPP;
 
        iface->type = NAND_SDR_IFACE;
 
@@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip,
                                   struct nand_nvddr_timings *spec_timings)
 {
        const struct nand_controller_ops *ops = chip->controller->ops;
-       int best_mode = 0, mode, ret;
+       int best_mode = 0, mode, ret = -EOPNOTSUPP;
 
        iface->type = NAND_NVDDR_IFACE;
 
@@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
                        NAND_OP_CMD(NAND_CMD_ERASE1, 0),
                        NAND_OP_ADDR(2, addrs, 0),
                        NAND_OP_CMD(NAND_CMD_ERASE2,
-                                   NAND_COMMON_TIMING_MS(conf, tWB_max)),
+                                   NAND_COMMON_TIMING_NS(conf, tWB_max)),
                        NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
                                         0),
                };
index 2ec8e01..533e476 100644 (file)
@@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work)
        struct slave *slave;
 
        if (!bond_has_slaves(bond)) {
-               bond_info->tx_rebalance_counter = 0;
+               atomic_set(&bond_info->tx_rebalance_counter, 0);
                bond_info->lp_counter = 0;
                goto re_arm;
        }
 
        rcu_read_lock();
 
-       bond_info->tx_rebalance_counter++;
+       atomic_inc(&bond_info->tx_rebalance_counter);
        bond_info->lp_counter++;
 
        /* send learning packets */
@@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work)
        }
 
        /* rebalance tx traffic */
-       if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+       if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
                bond_for_each_slave_rcu(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work)
                                bond_info->unbalanced_load = 0;
                        }
                }
-               bond_info->tx_rebalance_counter = 0;
+               atomic_set(&bond_info->tx_rebalance_counter, 0);
        }
 
        if (bond_info->rlb_enabled) {
@@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
        tlb_init_slave(slave);
 
        /* order a rebalance ASAP */
-       bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+       atomic_set(&bond->alb_info.tx_rebalance_counter,
+                  BOND_TLB_REBALANCE_TICKS);
 
        if (bond->alb_info.rlb_enabled)
                bond->alb_info.rlb_rebalance = 1;
@@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
                        rlb_clear_slave(bond, slave);
        } else if (link == BOND_LINK_UP) {
                /* order a rebalance ASAP */
-               bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+               atomic_set(&bond_info->tx_rebalance_counter,
+                          BOND_TLB_REBALANCE_TICKS);
                if (bond->alb_info.rlb_enabled) {
                        bond->alb_info.rlb_rebalance = 1;
                        /* If the updelay module parameter is smaller than the
index a8fde3b..b93337b 100644 (file)
@@ -1526,7 +1526,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
                mac = (u8 *)&newval->value;
        }
 
-       if (!is_valid_ether_addr(mac))
+       if (is_multicast_ether_addr(mac))
                goto err;
 
        netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
index 74d9899..eb74cdf 100644 (file)
@@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
 
+/* Kvaser KCAN_EPACK second word */
+#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+
 struct kvaser_pciefd;
 
 struct kvaser_pciefd_can {
@@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
 
        can->err_rep_cnt++;
        can->can.can_stats.bus_error++;
-       stats->rx_errors++;
+       if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
+               stats->tx_errors++;
+       else
+               stats->rx_errors++;
 
        can->bec.txerr = bec.txerr;
        can->bec.rxerr = bec.rxerr;
index 2470c47..c2a8421 100644 (file)
@@ -204,16 +204,16 @@ enum m_can_reg {
 
 /* Interrupts for version 3.0.x */
 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
-                        IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
-                        IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
+                        IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+                        IR_RF0L)
 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
 
 /* Interrupts for version >= 3.1.x */
 #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
-                        IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
-                        IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+                        IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+                        IR_RF0L)
 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
 
 /* Interrupt Line Select (ILS) */
@@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
                err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
                                      cf->data, DIV_ROUND_UP(cf->len, 4));
                if (err)
-                       goto out_fail;
+                       goto out_free_skb;
        }
 
        /* acknowledge rx fifo 0 */
@@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
 
        return 0;
 
+out_free_skb:
+       kfree_skb(skb);
 out_fail:
        netdev_err(dev, "FIFO read returned %d\n", err);
        return err;
@@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
 {
        if (irqstatus & IR_WDI)
                netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
-       if (irqstatus & IR_ELO)
-               netdev_err(dev, "Error Logging Overflow\n");
        if (irqstatus & IR_BEU)
                netdev_err(dev, "Bit Error Uncorrected\n");
        if (irqstatus & IR_BEC)
@@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
        case 30:
                /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
                can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-               cdev->can.bittiming_const = &m_can_bittiming_const_30X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_30X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_30X;
                break;
        case 31:
                /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
                can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-               cdev->can.bittiming_const = &m_can_bittiming_const_31X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_31X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_31X;
                break;
        case 32:
        case 33:
                /* Support both MCAN version v3.2.x and v3.3.0 */
-               cdev->can.bittiming_const = &m_can_bittiming_const_31X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_31X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_31X;
 
                cdev->can.ctrlmode_supported |=
                        (m_can_niso_supported(cdev) ?
index d18b515..2c5d409 100644 (file)
@@ -85,6 +85,9 @@ struct m_can_classdev {
        struct sk_buff *tx_skb;
        struct phy *transceiver;
 
+       const struct can_bittiming_const *bit_timing;
+       const struct can_bittiming_const *data_timing;
+
        struct m_can_ops *ops;
 
        int version;
index 89cc3d4..b56a54d 100644 (file)
 
 #define M_CAN_PCI_MMIO_BAR             0
 
-#define M_CAN_CLOCK_FREQ_EHL           100000000
 #define CTL_CSR_INT_CTL_OFFSET         0x508
 
+struct m_can_pci_config {
+       const struct can_bittiming_const *bit_timing;
+       const struct can_bittiming_const *data_timing;
+       unsigned int clock_freq;
+};
+
 struct m_can_pci_priv {
        struct m_can_classdev cdev;
 
@@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
 static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
 {
        struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+       void __iomem *src = priv->base + offset;
 
-       ioread32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               *(unsigned int *)val = ioread32(src);
+               val += 4;
+               src += 4;
+       }
 
        return 0;
 }
@@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
                            const void *val, size_t val_count)
 {
        struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+       void __iomem *dst = priv->base + offset;
 
-       iowrite32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               iowrite32(*(unsigned int *)val, dst);
+               val += 4;
+               dst += 4;
+       }
 
        return 0;
 }
@@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = {
        .read_fifo = iomap_read_fifo,
 };
 
+static const struct can_bittiming_const m_can_bittiming_const_ehl = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 64,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 128,
+       .sjw_max = 128,
+       .brp_min = 1,
+       .brp_max = 512,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 16,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 32,
+       .brp_inc = 1,
+};
+
+static const struct m_can_pci_config m_can_pci_ehl = {
+       .bit_timing = &m_can_bittiming_const_ehl,
+       .data_timing = &m_can_data_bittiming_const_ehl,
+       .clock_freq = 200000000,
+};
+
 static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
 {
        struct device *dev = &pci->dev;
+       const struct m_can_pci_config *cfg;
        struct m_can_classdev *mcan_class;
        struct m_can_pci_priv *priv;
        void __iomem *base;
@@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        if (!mcan_class)
                return -ENOMEM;
 
+       cfg = (const struct m_can_pci_config *)id->driver_data;
+
        priv = cdev_to_priv(mcan_class);
 
        priv->base = base;
@@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        mcan_class->dev = &pci->dev;
        mcan_class->net->irq = pci_irq_vector(pci, 0);
        mcan_class->pm_clock_support = 1;
-       mcan_class->can.clock.freq = id->driver_data;
+       mcan_class->bit_timing = cfg->bit_timing;
+       mcan_class->data_timing = cfg->data_timing;
+       mcan_class->can.clock.freq = cfg->clock_freq;
        mcan_class->ops = &m_can_pci_ops;
 
        pci_set_drvdata(pci, mcan_class);
@@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
                         m_can_pci_suspend, m_can_pci_resume);
 
 static const struct pci_device_id m_can_pci_id_table[] = {
-       { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
-       { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
+       { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
+       { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
        {  }    /* Terminating Entry */
 };
 MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
index 92a54a5..964c8a0 100644 (file)
@@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
                        cf->data[i + 1] = data_reg >> 8;
                }
 
-               netif_receive_skb(skb);
                rcv_pkts++;
                stats->rx_packets++;
                quota--;
                stats->rx_bytes += cf->len;
+               netif_receive_skb(skb);
 
                pch_fifo_thresh(priv, obj_num);
                obj_num++;
index e21b169..4642b6d 100644 (file)
@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
                        free_sja1000dev(dev);
        }
 
-       err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+       if (!card->channels) {
+               err = -ENODEV;
+               goto failure_cleanup;
+       }
+
+       err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
                          DRV_NAME, card);
        if (!err)
                return 0;
index 59ba7c7..f7af1bf 100644 (file)
 
 #include "kvaser_usb.h"
 
-/* Forward declaration */
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
-
-#define CAN_USB_CLOCK                  8000000
 #define MAX_USBCAN_NET_DEVICES         2
 
 /* Command header size */
@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
 
 #define CMD_LEAF_LOG_MESSAGE           106
 
+/* Leaf frequency options */
+#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
+#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
+#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
 /* error factors */
 #define M16C_EF_ACKE                   BIT(0)
 #define M16C_EF_CRCE                   BIT(1)
@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
        };
 };
 
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+       .name = "kvaser_usb",
+       .tseg1_min = KVASER_USB_TSEG1_MIN,
+       .tseg1_max = KVASER_USB_TSEG1_MAX,
+       .tseg2_min = KVASER_USB_TSEG2_MIN,
+       .tseg2_max = KVASER_USB_TSEG2_MAX,
+       .sjw_max = KVASER_USB_SJW_MAX,
+       .brp_min = KVASER_USB_BRP_MIN,
+       .brp_max = KVASER_USB_BRP_MAX,
+       .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+       .clock = {
+               .freq = 8000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+       .clock = {
+               .freq = 16000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+       .clock = {
+               .freq = 24000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+       .clock = {
+               .freq = 32000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
 static void *
 kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
                             const struct sk_buff *skb, int *frame_len,
@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
        return rc;
 }
 
+static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+                                                  const struct leaf_cmd_softinfo *softinfo)
+{
+       u32 sw_options = le32_to_cpu(softinfo->sw_options);
+
+       dev->fw_version = le32_to_cpu(softinfo->fw_version);
+       dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
+       switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+       case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
+               break;
+       case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
+               break;
+       case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
+               break;
+       }
+}
+
 static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
 {
        struct kvaser_cmd cmd;
@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
 
        switch (dev->card_data.leaf.family) {
        case KVASER_LEAF:
-               dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
-               dev->max_tx_urbs =
-                       le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+               kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
                break;
        case KVASER_USBCAN:
                dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
                dev->max_tx_urbs =
                        le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
                break;
        }
 
@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
 {
        struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
 
-       dev->cfg = &kvaser_usb_leaf_dev_cfg;
        card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
 
        return 0;
 }
 
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
-       .name = "kvaser_usb",
-       .tseg1_min = KVASER_USB_TSEG1_MIN,
-       .tseg1_max = KVASER_USB_TSEG1_MAX,
-       .tseg2_min = KVASER_USB_TSEG2_MIN,
-       .tseg2_max = KVASER_USB_TSEG2_MAX,
-       .sjw_max = KVASER_USB_SJW_MAX,
-       .brp_min = KVASER_USB_BRP_MIN,
-       .brp_max = KVASER_USB_BRP_MAX,
-       .brp_inc = KVASER_USB_BRP_INC,
-};
-
 static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
 {
        struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
        .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
        .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
 };
-
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
-       .clock = {
-               .freq = CAN_USB_CLOCK,
-       },
-       .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
-};
index f00cbf5..cd8462d 100644 (file)
@@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
        u16 reg;
        int err;
 
+       /* The 88e6250 family does not have the PHY detect bit. Instead,
+        * report whether the port is internal.
+        */
+       if (chip->info->family == MV88E6XXX_FAMILY_6250)
+               return port < chip->info->num_internal_phys;
+
        err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
        if (err) {
                dev_err(chip->dev,
@@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
 {
        struct mv88e6xxx_chip *chip = ds->priv;
        struct mv88e6xxx_port *p;
-       int err;
+       int err = 0;
 
        p = &chip->ports[port];
 
-       /* FIXME: is this the correct test? If we're in fixed mode on an
-        * internal port, why should we process this any different from
-        * PHY mode? On the other hand, the port may be automedia between
-        * an internal PHY and the serdes...
-        */
-       if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
-               return;
-
        mv88e6xxx_reg_lock(chip);
-       /* In inband mode, the link may come up at any time while the link
-        * is not forced down. Force the link down while we reconfigure the
-        * interface mode.
-        */
-       if (mode == MLO_AN_INBAND && p->interface != state->interface &&
-           chip->info->ops->port_set_link)
-               chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
 
-       err = mv88e6xxx_port_config_interface(chip, port, state->interface);
-       if (err && err != -EOPNOTSUPP)
-               goto err_unlock;
-
-       err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
-                                         state->advertising);
-       /* FIXME: we should restart negotiation if something changed - which
-        * is something we get if we convert to using phylinks PCS operations.
-        */
-       if (err > 0)
-               err = 0;
+       if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+               /* In inband mode, the link may come up at any time while the
+                * link is not forced down. Force the link down while we
+                * reconfigure the interface mode.
+                */
+               if (mode == MLO_AN_INBAND &&
+                   p->interface != state->interface &&
+                   chip->info->ops->port_set_link)
+                       chip->info->ops->port_set_link(chip, port,
+                                                      LINK_FORCED_DOWN);
+
+               err = mv88e6xxx_port_config_interface(chip, port,
+                                                     state->interface);
+               if (err && err != -EOPNOTSUPP)
+                       goto err_unlock;
+
+               err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+                                                 state->interface,
+                                                 state->advertising);
+               /* FIXME: we should restart negotiation if something changed -
+                * which is something we get if we convert to using phylinks
+                * PCS operations.
+                */
+               if (err > 0)
+                       err = 0;
+       }
 
        /* Undo the forced down state above after completing configuration
-        * irrespective of its state on entry, which allows the link to come up.
+        * irrespective of its state on entry, which allows the link to come
+        * up in the in-band case where there is no separate SERDES. Also
+        * ensure that the link can come up if the PPU is in use and we are
+        * in PHY mode (we treat the PPU as an effective in-band mechanism.)
         */
-       if (mode == MLO_AN_INBAND && p->interface != state->interface &&
-           chip->info->ops->port_set_link)
+       if (chip->info->ops->port_set_link &&
+           ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+            (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
                chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
 
        p->interface = state->interface;
@@ -752,13 +762,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       /* Internal PHYs propagate their configuration directly to the MAC.
-        * External PHYs depend on whether the PPU is enabled for this port.
+       /* Force the link down if we know the port may not be automatically
+        * updated by the switch or if we are using fixed-link mode.
         */
-       if (((!mv88e6xxx_phy_is_internal(ds, port) &&
-             !mv88e6xxx_port_ppu_updates(chip, port)) ||
+       if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
             mode == MLO_AN_FIXED) && ops->port_sync_link)
                err = ops->port_sync_link(chip, port, mode, false);
+
+       if (!err && ops->port_set_speed_duplex)
+               err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
+                                                DUPLEX_UNFORCED);
        mv88e6xxx_reg_unlock(chip);
 
        if (err)
@@ -779,11 +792,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       /* Internal PHYs propagate their configuration directly to the MAC.
-        * External PHYs depend on whether the PPU is enabled for this port.
+       /* Configure and force the link up if we know that the port may not
+        * automatically updated by the switch or if we are using fixed-link
+        * mode.
         */
-       if ((!mv88e6xxx_phy_is_internal(ds, port) &&
-            !mv88e6xxx_port_ppu_updates(chip, port)) ||
+       if (!mv88e6xxx_port_ppu_updates(chip, port) ||
            mode == MLO_AN_FIXED) {
                /* FIXME: for an automedia port, should we force the link
                 * down here - what if the link comes up due to "other" media
index d9817b2..ab41619 100644 (file)
@@ -283,7 +283,7 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       if (speed)
+       if (speed != SPEED_UNFORCED)
                dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
        else
                dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
@@ -516,7 +516,7 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       if (speed)
+       if (speed != SPEED_UNFORCED)
                dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
        else
                dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
index 5527301..2b05ead 100644 (file)
@@ -830,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
                           bool up)
 {
        u8 cmode = chip->ports[port].cmode;
-       int err = 0;
+       int err;
 
        switch (cmode) {
        case MV88E6XXX_PORT_STS_CMODE_SGMII:
@@ -842,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
        case MV88E6XXX_PORT_STS_CMODE_RXAUI:
                err = mv88e6390_serdes_power_10g(chip, lane, up);
                break;
+       default:
+               err = -EINVAL;
+               break;
        }
 
        if (!err && up)
@@ -1541,6 +1544,9 @@ int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
        case MV88E6393X_PORT_STS_CMODE_10GBASER:
                err = mv88e6390_serdes_power_10g(chip, lane, on);
                break;
+       default:
+               err = -EINVAL;
+               break;
        }
 
        if (err)
index 327cc46..f1a05e7 100644 (file)
@@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix)
                }
        }
 
-       if (cpu < 0)
+       if (cpu < 0) {
+               kfree(tagging_rule);
+               kfree(redirect_rule);
                return -EINVAL;
+       }
 
        tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
        *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
index d75d95a..993b2fb 100644 (file)
@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
                priv->rxdescmem_busaddr = dma_res->start;
 
        } else {
+               ret = -ENODEV;
                goto err_free_netdev;
        }
 
-       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
                dma_set_coherent_mask(priv->device,
                                      DMA_BIT_MASK(priv->dmaops->dmamask));
-       else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+       } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
                dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
-       else
+       } else {
+               ret = -EIO;
                goto err_free_netdev;
+       }
 
        /* MAC address space */
        ret = request_and_map(pdev, "control_port", &control_port,
index 7d5d885..c72f0c7 100644 (file)
@@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
 
 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
 {
-       struct ena_tx_buffer *tx_info = NULL;
+       struct ena_tx_buffer *tx_info;
 
-       if (likely(req_id < tx_ring->ring_size)) {
-               tx_info = &tx_ring->tx_buffer_info[req_id];
-               if (likely(tx_info->skb))
-                       return 0;
-       }
+       tx_info = &tx_ring->tx_buffer_info[req_id];
+       if (likely(tx_info->skb))
+               return 0;
 
        return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
 }
 
 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
 {
-       struct ena_tx_buffer *tx_info = NULL;
+       struct ena_tx_buffer *tx_info;
 
-       if (likely(req_id < xdp_ring->ring_size)) {
-               tx_info = &xdp_ring->tx_buffer_info[req_id];
-               if (likely(tx_info->xdpf))
-                       return 0;
-       }
+       tx_info = &xdp_ring->tx_buffer_info[req_id];
+       if (likely(tx_info->xdpf))
+               return 0;
 
        return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
 }
@@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
 
                rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
                                                &req_id);
-               if (rc)
+               if (rc) {
+                       if (unlikely(rc == -EINVAL))
+                               handle_invalid_req_id(tx_ring, req_id, NULL,
+                                                     false);
                        break;
+               }
 
+               /* validate that the request id points to a valid skb */
                rc = validate_tx_req_id(tx_ring, req_id);
                if (rc)
                        break;
@@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
                                  u16 *next_to_clean)
 {
        struct ena_rx_buffer *rx_info;
+       struct ena_adapter *adapter;
        u16 len, req_id, buf = 0;
        struct sk_buff *skb;
        void *page_addr;
@@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        rx_info = &rx_ring->rx_buffer_info[req_id];
 
        if (unlikely(!rx_info->page)) {
-               netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
-                         "Page is NULL\n");
+               adapter = rx_ring->adapter;
+               netif_err(adapter, rx_err, rx_ring->netdev,
+                         "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
+               ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
+               adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+               /* Make sure reset reason is set before triggering the reset */
+               smp_mb__before_atomic();
+               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
                return NULL;
        }
 
@@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
 
                rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
                                                &req_id);
-               if (rc)
+               if (rc) {
+                       if (unlikely(rc == -EINVAL))
+                               handle_invalid_req_id(xdp_ring, req_id, NULL,
+                                                     true);
                        break;
+               }
 
+               /* validate that the request id points to a valid xdp_frame */
                rc = validate_xdp_req_id(xdp_ring, req_id);
                if (rc)
                        break;
@@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
        max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
        /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
        max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
-       if (unlikely(!max_num_io_queues)) {
-               dev_err(&pdev->dev, "The device doesn't have io queues\n");
-               return -EFAULT;
-       }
 
        return max_num_io_queues;
 }
index 81b3756..77e76c9 100644 (file)
@@ -366,6 +366,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                if (!buff->is_eop) {
                        buff_ = buff;
                        do {
+                               if (buff_->next >= self->size) {
+                                       err = -EIO;
+                                       goto err_exit;
+                               }
                                next_ = buff_->next,
                                buff_ = &self->buff_ring[next_];
                                is_rsc_completed =
@@ -389,6 +393,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                            (buff->is_lro && buff->is_cso_err)) {
                                buff_ = buff;
                                do {
+                                       if (buff_->next >= self->size) {
+                                               err = -EIO;
+                                               goto err_exit;
+                                       }
                                        next_ = buff_->next,
                                        buff_ = &self->buff_ring[next_];
 
index 88d2ab7..4579ddf 100644 (file)
@@ -1913,15 +1913,12 @@ static int ag71xx_probe(struct platform_device *pdev)
        ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
        if (IS_ERR(ag->mac_reset)) {
                netif_err(ag, probe, ndev, "missing mac reset\n");
-               err = PTR_ERR(ag->mac_reset);
-               goto err_free;
+               return PTR_ERR(ag->mac_reset);
        }
 
        ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-       if (!ag->mac_base) {
-               err = -ENOMEM;
-               goto err_free;
-       }
+       if (!ag->mac_base)
+               return -ENOMEM;
 
        ndev->irq = platform_get_irq(pdev, 0);
        err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1929,7 +1926,7 @@ static int ag71xx_probe(struct platform_device *pdev)
        if (err) {
                netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
                          ndev->irq);
-               goto err_free;
+               return err;
        }
 
        ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1957,10 +1954,8 @@ static int ag71xx_probe(struct platform_device *pdev)
        ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
                                            sizeof(struct ag71xx_desc),
                                            &ag->stop_desc_dma, GFP_KERNEL);
-       if (!ag->stop_desc) {
-               err = -ENOMEM;
-               goto err_free;
-       }
+       if (!ag->stop_desc)
+               return -ENOMEM;
 
        ag->stop_desc->data = 0;
        ag->stop_desc->ctrl = 0;
@@ -1975,7 +1970,7 @@ static int ag71xx_probe(struct platform_device *pdev)
        err = of_get_phy_mode(np, &ag->phy_if_mode);
        if (err) {
                netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
-               goto err_free;
+               return err;
        }
 
        netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
@@ -1983,7 +1978,7 @@ static int ag71xx_probe(struct platform_device *pdev)
        err = clk_prepare_enable(ag->clk_eth);
        if (err) {
                netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
-               goto err_free;
+               return err;
        }
 
        ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -2019,8 +2014,6 @@ err_mdio_remove:
        ag71xx_mdio_remove(ag);
 err_put_clk:
        clk_disable_unprepare(ag->clk_eth);
-err_free:
-       free_netdev(ndev);
        return err;
 }
 
index 7cc5213..b07cb9b 100644 (file)
@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
 
        enet->irq_tx = platform_get_irq_byname(pdev, "tx");
 
-       dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        err = bcm4908_enet_dma_alloc(enet);
        if (err)
index 40933bf..60dde29 100644 (file)
@@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct device *kdev = &priv->pdev->dev;
        struct bcm_sysport_tx_ring *ring;
+       unsigned long flags, desc_flags;
        struct bcm_sysport_cb *cb;
        struct netdev_queue *txq;
        u32 len_status, addr_lo;
        unsigned int skb_len;
-       unsigned long flags;
        dma_addr_t mapping;
        u16 queue;
        int ret;
@@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        ring->desc_count--;
 
        /* Ports are latched, so write upper address first */
+       spin_lock_irqsave(&priv->desc_lock, desc_flags);
        tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
        tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+       spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
 
        /* Check ring space and update SW control flow */
        if (ring->desc_count == 0)
@@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev)
        }
 
        /* Initialize both hardware and software ring */
+       spin_lock_init(&priv->desc_lock);
        for (i = 0; i < dev->num_tx_queues; i++) {
                ret = bcm_sysport_init_tx_ring(priv, i);
                if (ret) {
index 984f76e..16b73bb 100644 (file)
@@ -711,6 +711,7 @@ struct bcm_sysport_priv {
        int                     wol_irq;
 
        /* Transmit rings */
+       spinlock_t              desc_lock;
        struct bcm_sysport_tx_ring *tx_rings;
 
        /* Receive queue */
index 5f25964..c888dde 100644 (file)
@@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
                 * Internal or external PHY with MDIO access
                 */
                phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
-               if (!phydev) {
+               if (IS_ERR(phydev)) {
                        dev_err(kdev, "failed to register PHY device\n");
-                       return -ENODEV;
+                       return PTR_ERR(phydev);
                }
        } else {
                /*
index aacf141..2faba07 100644 (file)
@@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
                    !cpumask_available(enic->msix[i].affinity_mask) ||
                    cpumask_empty(enic->msix[i].affinity_mask))
                        continue;
-               err = irq_set_affinity_hint(enic->msix_entry[i].vector,
-                                           enic->msix[i].affinity_mask);
+               err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+                                              enic->msix[i].affinity_mask);
                if (err)
-                       netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+                       netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
                                    err);
        }
 
@@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
        int i;
 
        for (i = 0; i < enic->intr_count; i++)
-               irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+               irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
 }
 
 static int enic_udp_tunnel_set_port(struct net_device *netdev,
index d51f24c..84b3ba9 100644 (file)
@@ -3491,7 +3491,7 @@ static int be_msix_register(struct be_adapter *adapter)
                if (status)
                        goto err_msix;
 
-               irq_set_affinity_hint(vec, eqo->affinity_mask);
+               irq_update_affinity_hint(vec, eqo->affinity_mask);
        }
 
        return 0;
@@ -3552,7 +3552,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
        /* MSIx */
        for_all_evt_queues(adapter, eqo, i) {
                vec = be_msix_vec_get(adapter, eqo);
-               irq_set_affinity_hint(vec, NULL);
+               irq_update_affinity_hint(vec, NULL);
                free_irq(vec, eqo);
        }
 
index 2085844..e54e70e 100644 (file)
@@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats {
        __u64 bytes_per_cdan;
 };
 
+#define DPAA2_ETH_CH_STATS     7
+
 /* Maximum number of queues associated with a DPNI */
 #define DPAA2_ETH_MAX_TCS              8
 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
index adb8ce5..3fdbf87 100644 (file)
@@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
        /* Per-channel stats */
        for (k = 0; k < priv->num_channels; k++) {
                ch_stats = &priv->channel[k]->stats;
-               for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+               for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
                        *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
        }
        i += j;
index 7b4961d..ed7301b 100644 (file)
@@ -377,6 +377,9 @@ struct bufdesc_ex {
 #define FEC_ENET_WAKEUP        ((uint)0x00020000)      /* Wakeup request */
 #define FEC_ENET_TXF   (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
 #define FEC_ENET_RXF   (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X)    (((X) == 0) ? FEC_ENET_RXF_0 :  \
+                               (((X) == 1) ? FEC_ENET_RXF_1 :  \
+                               FEC_ENET_RXF_2))
 #define FEC_ENET_TS_AVAIL       ((uint)0x00010000)
 #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
 
index bc418b9..1b1f7f2 100644 (file)
@@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                        break;
                pkt_received++;
 
-               writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+               writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
 
                /* Check for errors. */
                status ^= BD_ENET_RX_LAST;
index d9baac0..4c9d05c 100644 (file)
@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
        fman = dev_get_drvdata(&fm_pdev->dev);
        if (!fman) {
                err = -EINVAL;
-               goto return_err;
+               goto put_device;
        }
 
        err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
                dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
                        __func__, port_node);
                err = -EINVAL;
-               goto return_err;
+               goto put_device;
        }
        port_id = (u8)val;
        port->dts_params.id = port_id;
@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
        }  else {
                dev_err(port->dev, "%s: Illegal port type\n", __func__);
                err = -EINVAL;
-               goto return_err;
+               goto put_device;
        }
 
        port->dts_params.type = port_type;
@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
                        dev_err(port->dev, "%s: incorrect qman-channel-id\n",
                                __func__);
                        err = -EINVAL;
-                       goto return_err;
+                       goto put_device;
                }
                port->dts_params.qman_channel_id = qman_channel_id;
        }
@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
                dev_err(port->dev, "%s: of_address_to_resource() failed\n",
                        __func__);
                err = -ENOMEM;
-               goto return_err;
+               goto put_device;
        }
 
        port->dts_params.fman = fman;
@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
 
        return 0;
 
+put_device:
+       put_device(&fm_pdev->dev);
 return_err:
        of_node_put(port_node);
 free_port:
index 0b68852..5b8b9bc 100644 (file)
@@ -47,7 +47,6 @@ struct tgec_mdio_controller {
 #define MDIO_CTL_READ          BIT(15)
 
 #define MDIO_DATA(x)           (x & 0xffff)
-#define MDIO_DATA_BSY          BIT(31)
 
 struct mdio_fsl_priv {
        struct  tgec_mdio_controller __iomem *mdio_base;
index 83ae56c..326b56b 100644 (file)
@@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
         * is not set to GqiRda, choose the queue format in a priority order:
         * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
         */
-       if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
-               dev_info(&priv->pdev->dev,
-                        "Driver is running with GQI RDA queue format.\n");
-       } else if (dev_op_dqo_rda) {
+       if (dev_op_dqo_rda) {
                priv->queue_format = GVE_DQO_RDA_FORMAT;
                dev_info(&priv->pdev->dev,
                         "Driver is running with DQO RDA queue format.\n");
@@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
                         "Driver is running with GQI RDA queue format.\n");
                supported_features_mask =
                        be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+       } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
        } else {
                priv->queue_format = GVE_GQI_QPL_FORMAT;
                if (dev_op_gqi_qpl)
index 88ca49c..d57508b 100644 (file)
@@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
                set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
        } else {
                skb = napi_alloc_skb(napi, len);
+
+               if (unlikely(!skb))
+                       return NULL;
                set_protocol = true;
        }
        __skb_put(skb, len);
index 3f7a9a4..63f5abc 100644 (file)
@@ -839,6 +839,8 @@ struct hnae3_handle {
 
        u8 netdev_flags;
        struct dentry *hnae3_dbgfs;
+       /* protects concurrent contention between debugfs commands */
+       struct mutex dbgfs_lock;
 
        /* Network interface message level enabled bits */
        u32 msg_enable;
index 081295b..c381f8a 100644 (file)
@@ -1226,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
        if (ret)
                return ret;
 
+       mutex_lock(&handle->dbgfs_lock);
        save_buf = &hns3_dbg_cmd[index].buf;
 
        if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
@@ -1238,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
                read_buf = *save_buf;
        } else {
                read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
-               if (!read_buf)
-                       return -ENOMEM;
+               if (!read_buf) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
                /* save the buffer addr until the last read operation */
                *save_buf = read_buf;
-       }
 
-       /* get data ready for the first time to read */
-       if (!*ppos) {
+               /* get data ready for the first time to read */
                ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
                                        read_buf, hns3_dbg_cmd[index].buf_len);
                if (ret)
@@ -1255,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
 
        size = simple_read_from_buffer(buffer, count, ppos, read_buf,
                                       strlen(read_buf));
-       if (size > 0)
+       if (size > 0) {
+               mutex_unlock(&handle->dbgfs_lock);
                return size;
+       }
 
 out:
        /* free the buffer for the last read operation */
@@ -1265,6 +1268,7 @@ out:
                *save_buf = NULL;
        }
 
+       mutex_unlock(&handle->dbgfs_lock);
        return ret;
 }
 
@@ -1337,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle)
                        debugfs_create_dir(hns3_dbg_dentry[i].name,
                                           handle->hnae3_dbgfs);
 
+       mutex_init(&handle->dbgfs_lock);
+
        for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
                if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
                     ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1363,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle)
        return 0;
 
 out:
+       mutex_destroy(&handle->dbgfs_lock);
        debugfs_remove_recursive(handle->hnae3_dbgfs);
        handle->hnae3_dbgfs = NULL;
        return ret;
@@ -1378,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
                        hns3_dbg_cmd[i].buf = NULL;
                }
 
+       mutex_destroy(&handle->dbgfs_lock);
        debugfs_remove_recursive(handle->hnae3_dbgfs);
        handle->hnae3_dbgfs = NULL;
 }
index fdc66fa..c5ac6ec 100644 (file)
@@ -114,7 +114,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
 
        memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
 
-       trace_hclge_vf_mbx_send(hdev, req);
+       if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+               trace_hclge_vf_mbx_send(hdev, req);
 
        /* synchronous send */
        if (need_resp) {
index fed3b6b..b33ed4d 100644 (file)
@@ -548,7 +548,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
                goto err_req_irq;
 
        cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
-       err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+       err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
        if (err)
                goto err_irq_affinity;
 
@@ -565,7 +565,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
 {
        struct hinic_rq *rq = rxq->rq;
 
-       irq_set_affinity_hint(rq->irq, NULL);
+       irq_update_affinity_hint(rq->irq, NULL);
        free_irq(rq->irq, rxq);
        rx_del_napi(rxq);
 }
index a78c398..01e7d3c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/module.h>
 
 #include "hinic_hw_dev.h"
 #include "hinic_dev.h"
index 291e61a..2c1b1da 100644 (file)
@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
                return;
        }
+       if (vsi->type != I40E_VSI_MAIN &&
+           vsi->type != I40E_VSI_FDIR &&
+           vsi->type != I40E_VSI_VMDQ2) {
+               dev_info(&pf->pdev->dev,
+                        "vsi %d type %d descriptor rings not available\n",
+                        vsi_seid, vsi->type);
+               return;
+       }
        if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
                dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
                return;
index e118cf9..2a3d8ae 100644 (file)
@@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
 
 static struct workqueue_struct *i40e_wq;
 
+static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+                                 struct net_device *netdev, int delta)
+{
+       struct netdev_hw_addr *ha;
+
+       if (!f || !netdev)
+               return;
+
+       netdev_for_each_mc_addr(ha, netdev) {
+               if (ether_addr_equal(ha->addr, f->macaddr)) {
+                       ha->refcount += delta;
+                       if (ha->refcount <= 0)
+                               ha->refcount = 1;
+                       break;
+               }
+       }
+}
+
 /**
  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
@@ -2036,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
        hlist_for_each_entry_safe(new, h, from, hlist) {
                /* We can simply free the wrapper structure */
                hlist_del(&new->hlist);
+               netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
                kfree(new);
        }
 }
@@ -2383,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                                       &tmp_add_list,
                                                       &tmp_del_list,
                                                       vlan_filters);
+
+               hlist_for_each_entry(new, &tmp_add_list, hlist)
+                       netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
+
                if (retval)
                        goto err_no_memory_locked;
 
@@ -2515,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                        if (new->f->state == I40E_FILTER_NEW)
                                new->f->state = new->state;
                        hlist_del(&new->hlist);
+                       netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
                        kfree(new);
                }
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -3891,10 +3915,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
                 *
                 * get_cpu_mask returns a static constant mask with
                 * a permanent lifetime so it's ok to pass to
-                * irq_set_affinity_hint without making a copy.
+                * irq_update_affinity_hint without making a copy.
                 */
                cpu = cpumask_local_spread(q_vector->v_idx, -1);
-               irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+               irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
        }
 
        vsi->irqs_ready = true;
@@ -3905,7 +3929,7 @@ free_queue_irqs:
                vector--;
                irq_num = pf->msix_entries[base + vector].vector;
                irq_set_affinity_notifier(irq_num, NULL);
-               irq_set_affinity_hint(irq_num, NULL);
+               irq_update_affinity_hint(irq_num, NULL);
                free_irq(irq_num, &vsi->q_vectors[vector]);
        }
        return err;
@@ -4726,7 +4750,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
                        /* clear the affinity notifier in the IRQ descriptor */
                        irq_set_affinity_notifier(irq_num, NULL);
                        /* remove our suggested affinity mask for this IRQ */
-                       irq_set_affinity_hint(irq_num, NULL);
+                       irq_update_affinity_hint(irq_num, NULL);
                        synchronize_irq(irq_num);
                        free_irq(irq_num, vsi->q_vectors[i]);
 
@@ -8716,6 +8740,27 @@ int i40e_open(struct net_device *netdev)
        return 0;
 }
 
+/**
+ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
+ * @vsi: vsi structure
+ *
+ * This updates netdev's number of tx/rx queues
+ *
+ * Returns status of setting tx/rx queues
+ **/
+static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
+{
+       int ret;
+
+       ret = netif_set_real_num_rx_queues(vsi->netdev,
+                                          vsi->num_queue_pairs);
+       if (ret)
+               return ret;
+
+       return netif_set_real_num_tx_queues(vsi->netdev,
+                                           vsi->num_queue_pairs);
+}
+
 /**
  * i40e_vsi_open -
  * @vsi: the VSI to open
@@ -8752,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
                        goto err_setup_rx;
 
                /* Notify the stack of the actual queue counts. */
-               err = netif_set_real_num_tx_queues(vsi->netdev,
-                                                  vsi->num_queue_pairs);
-               if (err)
-                       goto err_set_queues;
-
-               err = netif_set_real_num_rx_queues(vsi->netdev,
-                                                  vsi->num_queue_pairs);
+               err = i40e_netif_set_realnum_tx_rx_queues(vsi);
                if (err)
                        goto err_set_queues;
 
@@ -14149,6 +14188,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
        case I40E_VSI_MAIN:
        case I40E_VSI_VMDQ2:
                ret = i40e_config_netdev(vsi);
+               if (ret)
+                       goto err_netdev;
+               ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
                if (ret)
                        goto err_netdev;
                ret = register_netdev(vsi->netdev);
@@ -15451,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
            hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
-               dev_info(&pdev->dev,
-                        "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+               dev_dbg(&pdev->dev,
+                       "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
                         hw->aq.api_maj_ver,
                         hw->aq.api_min_ver,
                         I40E_FW_API_VERSION_MAJOR,
index 80ae264..048f167 100644 (file)
@@ -1877,17 +1877,19 @@ sriov_configure_out:
 /***********************virtual channel routines******************/
 
 /**
- * i40e_vc_send_msg_to_vf
+ * i40e_vc_send_msg_to_vf_ex
  * @vf: pointer to the VF info
  * @v_opcode: virtual channel opcode
  * @v_retval: virtual channel return value
  * @msg: pointer to the msg buffer
  * @msglen: msg length
+ * @is_quiet: true for not printing unsuccessful return values, false otherwise
  *
  * send msg to VF
  **/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
-                                 u32 v_retval, u8 *msg, u16 msglen)
+static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
+                                    u32 v_retval, u8 *msg, u16 msglen,
+                                    bool is_quiet)
 {
        struct i40e_pf *pf;
        struct i40e_hw *hw;
@@ -1903,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 
        /* single place to detect unsuccessful return values */
-       if (v_retval) {
+       if (v_retval && !is_quiet) {
                vf->num_invalid_msgs++;
                dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
                         vf->vf_id, v_opcode, v_retval);
@@ -1933,6 +1935,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
        return 0;
 }
 
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+                                 u32 v_retval, u8 *msg, u16 msglen)
+{
+       return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
+                                        msg, msglen, false);
+}
+
 /**
  * i40e_vc_send_resp_to_vf
  * @vf: pointer to the VF info
@@ -1948,6 +1967,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
 }
 
+/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+       int i;
+
+       /* When handling some messages, it needs VF state to be set.
+        * It is possible that this flag is cleared during VF reset,
+        * so there is a need to wait until the end of the reset to
+        * handle the request message correctly.
+        */
+       for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+               if (test_bit(state, &vf->vf_states))
+                       return true;
+               usleep_range(10000, 20000);
+       }
+
+       return test_bit(state, &vf->vf_states);
+}
+
 /**
  * i40e_vc_get_version_msg
  * @vf: pointer to the VF info
@@ -2008,7 +2053,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        size_t len = 0;
        int ret;
 
-       if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -2131,7 +2176,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
        bool allmulti = false;
        bool alluni = false;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err_out;
        }
@@ -2219,7 +2264,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_vsi *vsi;
        u16 num_qps_all = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2368,7 +2413,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2540,7 +2585,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2590,7 +2635,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
        u8 cur_pairs = vf->num_queue_pairs;
        struct i40e_pf *pf = vf->pf;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
                return -EINVAL;
 
        if (req_pairs > I40E_MAX_VF_QUEUES) {
@@ -2635,7 +2680,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
 
        memset(&stats, 0, sizeof(struct i40e_eth_stats));
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2669,6 +2714,7 @@ error_param:
  * i40e_check_vf_permission
  * @vf: pointer to the VF info
  * @al: MAC address list from virtchnl
+ * @is_quiet: set true for printing msg without opcode info, false otherwise
  *
  * Check that the given list of MAC addresses is allowed. Will return -EPERM
  * if any address in the list is not valid. Checks the following conditions:
@@ -2683,13 +2729,15 @@ error_param:
  * addresses might not be accurate.
  **/
 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
-                                          struct virtchnl_ether_addr_list *al)
+                                          struct virtchnl_ether_addr_list *al,
+                                          bool *is_quiet)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
        int mac2add_cnt = 0;
        int i;
 
+       *is_quiet = false;
        for (i = 0; i < al->num_elements; i++) {
                struct i40e_mac_filter *f;
                u8 *addr = al->list[i].addr;
@@ -2713,6 +2761,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
                    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
                        dev_err(&pf->pdev->dev,
                                "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+                       *is_quiet = true;
                        return -EPERM;
                }
 
@@ -2749,10 +2798,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
            (struct virtchnl_ether_addr_list *)msg;
        struct i40e_pf *pf = vf->pf;
        struct i40e_vsi *vsi = NULL;
+       bool is_quiet = false;
        i40e_status ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -2765,7 +2815,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
         */
        spin_lock_bh(&vsi->mac_filter_hash_lock);
 
-       ret = i40e_check_vf_permission(vf, al);
+       ret = i40e_check_vf_permission(vf, al, &is_quiet);
        if (ret) {
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
                goto error_param;
@@ -2803,8 +2853,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
 
 error_param:
        /* send the response to the VF */
-       return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
-                                      ret);
+       return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+                                      ret, NULL, 0, is_quiet);
 }
 
 /**
@@ -2824,7 +2874,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -2968,7 +3018,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -3088,9 +3138,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
        struct i40e_vsi *vsi = NULL;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
-           (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+           vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3119,9 +3169,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        u16 i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
-           (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+           vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3154,7 +3204,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int len = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3190,7 +3240,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
        struct i40e_hw *hw = &pf->hw;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3215,7 +3265,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3241,7 +3291,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3468,7 +3518,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i, ret;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3599,7 +3649,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i, ret;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err_out;
        }
@@ -3708,7 +3758,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        u64 speed = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3797,11 +3847,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
 
        /* set this flag only after making sure all inputs are sane */
        vf->adq_enabled = true;
-       /* num_req_queues is set when user changes number of queues via ethtool
-        * and this causes issue for default VSI(which depends on this variable)
-        * when ADq is enabled, hence reset it.
-        */
-       vf->num_req_queues = 0;
 
        /* reset the VF in order to allocate resources */
        i40e_vc_reset_vf(vf, true);
@@ -3824,7 +3869,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
index 091e32c..49575a6 100644 (file)
@@ -18,6 +18,8 @@
 
 #define I40E_MAX_VF_PROMISC_FLAGS      3
 
+#define I40E_VF_STATE_WAIT_COUNT       20
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
        I40E_QUEUE_CTRL_UNKNOWN = 0,
index 0cecaff..461f523 100644 (file)
@@ -615,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       new_tx_count = clamp_t(u32, ring->tx_pending,
-                              IAVF_MIN_TXD,
-                              IAVF_MAX_TXD);
-       new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (ring->tx_pending > IAVF_MAX_TXD ||
+           ring->tx_pending < IAVF_MIN_TXD ||
+           ring->rx_pending > IAVF_MAX_RXD ||
+           ring->rx_pending < IAVF_MIN_RXD) {
+               netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+                          ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+                          IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+               return -EINVAL;
+       }
 
-       new_rx_count = clamp_t(u32, ring->rx_pending,
-                              IAVF_MIN_RXD,
-                              IAVF_MAX_RXD);
-       new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (new_tx_count != ring->tx_pending)
+               netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+                           new_tx_count);
+
+       new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (new_rx_count != ring->rx_pending)
+               netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+                           new_rx_count);
 
        /* if nothing to do return success */
        if ((new_tx_count == adapter->tx_desc_count) &&
-           (new_rx_count == adapter->rx_desc_count))
+           (new_rx_count == adapter->rx_desc_count)) {
+               netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
                return 0;
+       }
 
-       adapter->tx_desc_count = new_tx_count;
-       adapter->rx_desc_count = new_rx_count;
+       if (new_tx_count != adapter->tx_desc_count) {
+               netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+                          adapter->tx_desc_count, new_tx_count);
+               adapter->tx_desc_count = new_tx_count;
+       }
+
+       if (new_rx_count != adapter->rx_desc_count) {
+               netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+                          adapter->rx_desc_count, new_rx_count);
+               adapter->rx_desc_count = new_rx_count;
+       }
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
index 14934a7..b1c161f 100644 (file)
@@ -492,10 +492,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
                irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
                /* Spread the IRQ affinity hints across online CPUs. Note that
                 * get_cpu_mask returns a mask with a permanent lifetime so
-                * it's safe to use as a hint for irq_set_affinity_hint.
+                * it's safe to use as a hint for irq_update_affinity_hint.
                 */
                cpu = cpumask_local_spread(q_vector->v_idx, -1);
-               irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+               irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
        }
 
        return 0;
@@ -505,7 +505,7 @@ free_queue_irqs:
                vector--;
                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
                irq_set_affinity_notifier(irq_num, NULL);
-               irq_set_affinity_hint(irq_num, NULL);
+               irq_update_affinity_hint(irq_num, NULL);
                free_irq(irq_num, &adapter->q_vectors[vector]);
        }
        return err;
@@ -557,7 +557,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
        for (vector = 0; vector < q_vectors; vector++) {
                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
                irq_set_affinity_notifier(irq_num, NULL);
-               irq_set_affinity_hint(irq_num, NULL);
+               irq_update_affinity_hint(irq_num, NULL);
                free_irq(irq_num, &adapter->q_vectors[vector]);
        }
 }
@@ -2046,6 +2046,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                }
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+               mutex_unlock(&adapter->crit_lock);
                queue_delayed_work(iavf_wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
@@ -2076,16 +2077,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                        iavf_detect_recover_hung(&adapter->vsi);
                break;
        case __IAVF_REMOVE:
-               mutex_unlock(&adapter->crit_lock);
-               return;
        default:
+               mutex_unlock(&adapter->crit_lock);
                return;
        }
 
        /* check for hw reset */
        reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
        if (!reg_val) {
-               iavf_change_state(adapter, __IAVF_RESETTING);
                adapter->flags |= IAVF_FLAG_RESET_PENDING;
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2248,6 +2247,7 @@ static void iavf_reset_task(struct work_struct *work)
        }
 
        pci_set_master(adapter->pdev);
+       pci_restore_msi_state(adapter->pdev);
 
        if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
@@ -2708,8 +2708,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
                total_max_rate += tx_rate;
                num_qps += mqprio_qopt->qopt.count[i];
        }
-       if (num_qps > IAVF_MAX_REQ_QUEUES)
+       if (num_qps > adapter->num_active_queues) {
+               dev_err(&adapter->pdev->dev,
+                       "Cannot support requested number of queues\n");
                return -EINVAL;
+       }
 
        ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
        return ret;
index 1efc635..fafe020 100644 (file)
@@ -6,6 +6,18 @@
 #include "ice_lib.h"
 #include "ice_dcb_lib.h"
 
+static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
+{
+       rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
+       return !!rx_ring->xdp_buf;
+}
+
+static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
+{
+       rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
+       return !!rx_ring->rx_buf;
+}
+
 /**
  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
                                         ring->q_index, ring->q_vector->napi.napi_id);
 
+               kfree(ring->rx_buf);
                ring->xsk_pool = ice_xsk_pool(ring);
                if (ring->xsk_pool) {
+                       if (!ice_alloc_rx_buf_zc(ring))
+                               return -ENOMEM;
                        xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
 
                        ring->rx_buf_len =
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                                 ring->q_index);
                } else {
+                       if (!ice_alloc_rx_buf(ring))
+                               return -ENOMEM;
                        if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
                                /* coverity[check_return] */
                                xdp_rxq_info_reg(&ring->xdp_rxq,
index 7fdeb41..3eb0173 100644 (file)
@@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
 
        new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
 
+       if (!bwcfg)
+               new_cfg->etscfg.tcbwtable[0] = 100;
+
        if (!bwrec)
                new_cfg->etsrec.tcbwtable[0] = 100;
 
@@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
        if (mode == pf->dcbx_cap)
                return ICE_DCB_NO_HW_CHG;
 
-       pf->dcbx_cap = mode;
        qos_cfg = &pf->hw.port_info->qos_cfg;
-       if (mode & DCB_CAP_DCBX_VER_CEE) {
-               if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
-                       return ICE_DCB_NO_HW_CHG;
+
+       /* DSCP configuration is not DCBx negotiated */
+       if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+               return ICE_DCB_NO_HW_CHG;
+
+       pf->dcbx_cap = mode;
+
+       if (mode & DCB_CAP_DCBX_VER_CEE)
                qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
-       } else {
+       else
                qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
-       }
 
        dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
        return ICE_DCB_HW_CHG_RST;
index 38960bc..b6e7f47 100644 (file)
@@ -1268,7 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
                bool is_tun = tun == ICE_FD_HW_SEG_TUN;
                int err;
 
-               if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
+               if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
                        continue;
                err = ice_fdir_write_fltr(pf, input, add, is_tun);
                if (err)
@@ -1652,7 +1652,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
        }
 
        /* return error if not an update and no available filters */
-       fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
+       fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
        if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
            ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
                dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
index cbd8424..4dca009 100644 (file)
@@ -924,7 +924,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
                memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
                loc = pkt;
        } else {
-               if (!ice_get_open_tunnel_port(hw, &tnl_port))
+               if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
                        return ICE_ERR_DOES_NOT_EXIST;
                if (!ice_fdir_pkt[idx].tun_pkt)
                        return ICE_ERR_PARAM;
index 23cfcce..6ad1c25 100644 (file)
@@ -1899,9 +1899,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
  * ice_get_open_tunnel_port - retrieve an open tunnel port
  * @hw: pointer to the HW structure
  * @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
  */
 bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+                        enum ice_tunnel_type type)
 {
        bool res = false;
        u16 i;
@@ -1909,7 +1911,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
        mutex_lock(&hw->tnl_lock);
 
        for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
-               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+                   (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
                        *port = hw->tnl.tbl[i].port;
                        res = true;
                        break;
index 344c263..a2863f3 100644 (file)
@@ -33,7 +33,8 @@ enum ice_status
 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
                   unsigned long *bm, struct list_head *fv_list);
 bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+                        enum ice_tunnel_type type);
 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
                            unsigned int idx, struct udp_tunnel_info *ti);
 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
index 4d1fc48..73c61cd 100644 (file)
@@ -5881,6 +5881,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
                netif_carrier_on(vsi->netdev);
        }
 
+       /* clear this now, and the first stats read will be used as baseline */
+       vsi->stat_offsets_loaded = false;
+
        ice_service_task_schedule(pf);
 
        return 0;
@@ -5927,14 +5930,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st
 /**
  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
  * @vsi: the VSI to be updated
+ * @vsi_stats: the stats struct to be updated
  * @rings: rings to work on
  * @count: number of rings
  */
 static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
-                            u16 count)
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+                            struct rtnl_link_stats64 *vsi_stats,
+                            struct ice_tx_ring **rings, u16 count)
 {
-       struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
        u16 i;
 
        for (i = 0; i < count; i++) {
@@ -5958,15 +5962,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
  */
 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
 {
-       struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+       struct rtnl_link_stats64 *vsi_stats;
        u64 pkts, bytes;
        int i;
 
-       /* reset netdev stats */
-       vsi_stats->tx_packets = 0;
-       vsi_stats->tx_bytes = 0;
-       vsi_stats->rx_packets = 0;
-       vsi_stats->rx_bytes = 0;
+       vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
+       if (!vsi_stats)
+               return;
 
        /* reset non-netdev (extended) stats */
        vsi->tx_restart = 0;
@@ -5978,7 +5980,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
        rcu_read_lock();
 
        /* update Tx rings counters */
-       ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
+       ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+                                    vsi->num_txq);
 
        /* update Rx rings counters */
        ice_for_each_rxq(vsi, i) {
@@ -5993,10 +5996,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
 
        /* update XDP Tx rings counters */
        if (ice_is_xdp_ena_vsi(vsi))
-               ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+               ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
                                             vsi->num_xdp_txq);
 
        rcu_read_unlock();
+
+       vsi->net_stats.tx_packets = vsi_stats->tx_packets;
+       vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
+       vsi->net_stats.rx_packets = vsi_stats->rx_packets;
+       vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+
+       kfree(vsi_stats);
 }
 
 /**
index bf7247c..442b031 100644 (file)
@@ -705,7 +705,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
                scaled_ppm = -scaled_ppm;
        }
 
-       while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+       while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
                /* handle overflow by scaling down the scaled_ppm and
                 * the divisor, losing some precision
                 */
@@ -1540,19 +1540,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
                if (err)
                        continue;
 
-               /* Check if the timestamp is valid */
-               if (!(raw_tstamp & ICE_PTP_TS_VALID))
+               /* Check if the timestamp is invalid or stale */
+               if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+                   raw_tstamp == tx->tstamps[idx].cached_tstamp)
                        continue;
 
-               /* clear the timestamp register, so that it won't show valid
-                * again when re-used.
-                */
-               ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
-
                /* The timestamp is valid, so we'll go ahead and clear this
                 * index and then send the timestamp up to the stack.
                 */
                spin_lock(&tx->lock);
+               tx->tstamps[idx].cached_tstamp = raw_tstamp;
                clear_bit(idx, tx->in_use);
                skb = tx->tstamps[idx].skb;
                tx->tstamps[idx].skb = NULL;
index f71ad31..53c15fc 100644 (file)
@@ -55,15 +55,21 @@ struct ice_perout_channel {
  * struct ice_tx_tstamp - Tracking for a single Tx timestamp
  * @skb: pointer to the SKB for this timestamp request
  * @start: jiffies when the timestamp was first requested
+ * @cached_tstamp: last read timestamp
  *
  * This structure tracks a single timestamp request. The SKB pointer is
  * provided when initiating a request. The start time is used to ensure that
  * we discard old requests that were not fulfilled within a 2 second time
  * window.
+ * Timestamp values in the PHY are read only and do not get cleared except at
+ * hardware reset or when a new timestamp value is captured. The cached_tstamp
+ * field is used to detect the case where a new timestamp has not yet been
+ * captured, ensuring that we avoid sending stale timestamp data to the stack.
  */
 struct ice_tx_tstamp {
        struct sk_buff *skb;
        unsigned long start;
+       u64 cached_tstamp;
 };
 
 /**
index 793f4a9..183d930 100644 (file)
@@ -3796,10 +3796,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
  * ice_find_recp - find a recipe
  * @hw: pointer to the hardware structure
  * @lkup_exts: extension sequence to match
+ * @tun_type: type of recipe tunnel
  *
  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
  */
-static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+static u16
+ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
+             enum ice_sw_tunnel_type tun_type)
 {
        bool refresh_required = true;
        struct ice_sw_recipe *recp;
@@ -3860,8 +3863,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
                        }
                        /* If for "i"th recipe the found was never set to false
                         * then it means we found our match
+                        * Also tun type of recipe needs to be checked
                         */
-                       if (found)
+                       if (found && recp[i].tun_type == tun_type)
                                return i; /* Return the recipe ID */
                }
        }
@@ -4651,11 +4655,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        }
 
        /* Look for a recipe which matches our requested fv / mask list */
-       *rid = ice_find_recp(hw, lkup_exts);
+       *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
        if (*rid < ICE_MAX_NUM_RECIPES)
                /* Success if found a recipe that match the existing criteria */
                goto err_unroll;
 
+       rm->tun_type = rinfo->tun_type;
        /* Recipe we need does not exist, add a recipe */
        status = ice_add_sw_recipe(hw, rm, profiles);
        if (status)
@@ -4958,11 +4963,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
 
        switch (tun_type) {
        case ICE_SW_TUN_VXLAN:
+               if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
+                       return ICE_ERR_CFG;
+               break;
        case ICE_SW_TUN_GENEVE:
-               if (!ice_get_open_tunnel_port(hw, &open_port))
+               if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
                        return ICE_ERR_CFG;
                break;
-
        default:
                /* Nothing needs to be done for this tunnel type */
                return 0;
@@ -5555,7 +5562,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        if (status)
                return status;
 
-       rid = ice_find_recp(hw, &lkup_exts);
+       rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
        /* If did not find a recipe that match the existing criteria */
        if (rid == ICE_MAX_NUM_RECIPES)
                return ICE_ERR_PARAM;
index e5d23fe..25cca5c 100644 (file)
@@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
        return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
 }
 
-static enum ice_protocol_type
-ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
 {
-       if (inner) {
-               switch (ip_proto) {
-               case IPPROTO_UDP:
-                       return ICE_UDP_ILOS;
-               }
-       } else {
-               switch (ip_proto) {
-               case IPPROTO_TCP:
-                       return ICE_TCP_IL;
-               case IPPROTO_UDP:
-                       return ICE_UDP_OF;
-               }
+       switch (ip_proto) {
+       case IPPROTO_TCP:
+               return ICE_TCP_IL;
+       case IPPROTO_UDP:
+               return ICE_UDP_ILOS;
        }
 
        return 0;
@@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
                i++;
        }
 
-       if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
-               list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+       if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
+           hdr->l3_key.ip_proto == IPPROTO_UDP) {
+               list[i].type = ICE_UDP_OF;
                list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
                list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
                i++;
@@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
                     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
                struct ice_tc_l4_hdr *l4_key, *l4_mask;
 
-               list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+               list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
                l4_key = &headers->l4_key;
                l4_mask = &headers->l4_mask;
 
@@ -802,7 +795,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
                headers->l3_mask.ttl = match.mask->ttl;
        }
 
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+           fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
                struct flow_match_ports match;
 
                flow_rule_match_enc_ports(rule, &match);
index bc3ba19..dccf09e 100644 (file)
@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
        }
 
 rx_skip_free:
-       memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+       if (rx_ring->xsk_pool)
+               memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+       else
+               memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
 
        /* Zero out the descriptor ring */
        size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
                if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
                        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
        rx_ring->xdp_prog = NULL;
-       devm_kfree(rx_ring->dev, rx_ring->rx_buf);
-       rx_ring->rx_buf = NULL;
+       if (rx_ring->xsk_pool) {
+               kfree(rx_ring->xdp_buf);
+               rx_ring->xdp_buf = NULL;
+       } else {
+               kfree(rx_ring->rx_buf);
+               rx_ring->rx_buf = NULL;
+       }
 
        if (rx_ring->desc) {
                size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        /* warn if we are about to overwrite the pointer */
        WARN_ON(rx_ring->rx_buf);
        rx_ring->rx_buf =
-               devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
-                            GFP_KERNEL);
+               kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
        if (!rx_ring->rx_buf)
                return -ENOMEM;
 
@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        return 0;
 
 err:
-       devm_kfree(dev, rx_ring->rx_buf);
+       kfree(rx_ring->rx_buf);
        rx_ring->rx_buf = NULL;
        return -ENOMEM;
 }
index c56dd17..b7b3bd4 100644 (file)
@@ -24,7 +24,6 @@
 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
        (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
 
-#define ICE_RX_BUF_WRITE       16      /* Must be power of 2 */
 #define ICE_MAX_TXQ_PER_TXQG   128
 
 /* Attempt to maximize the headroom available for incoming frames. We use a 2K
index 217ff5e..6427e7e 100644 (file)
@@ -1617,6 +1617,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
                ice_vc_set_default_allowlist(vf);
 
                ice_vf_fdir_exit(vf);
+               ice_vf_fdir_init(vf);
                /* clean VF control VSI when resetting VFs since it should be
                 * setup only when VF creates its first FDIR rule.
                 */
@@ -1747,6 +1748,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
        }
 
        ice_vf_fdir_exit(vf);
+       ice_vf_fdir_init(vf);
        /* clean VF control VSI when resetting VF since it should be setup
         * only when VF creates its first FDIR rule.
         */
@@ -2021,6 +2023,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
        if (ret)
                goto err_unroll_sriov;
 
+       /* rearm global interrupts */
+       if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+               ice_irq_dynamic_ena(hw, NULL, NULL);
+
        return 0;
 
 err_unroll_sriov:
index bb9a808..c895351 100644 (file)
 #include "ice_txrx_lib.h"
 #include "ice_lib.h"
 
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+       return &rx_ring->xdp_buf[idx];
+}
+
 /**
  * ice_qp_reset_stats - Resets all stats for rings of given index
  * @vsi: VSI that contains rings of interest
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        dma_addr_t dma;
 
        rx_desc = ICE_RX_DESC(rx_ring, ntu);
-       xdp = &rx_ring->xdp_buf[ntu];
+       xdp = ice_xdp_buf(rx_ring, ntu);
 
        nb_buffs = min_t(u16, count, rx_ring->count - ntu);
        nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        }
 
        ntu += nb_buffs;
-       if (ntu == rx_ring->count) {
-               rx_desc = ICE_RX_DESC(rx_ring, 0);
-               xdp = rx_ring->xdp_buf;
+       if (ntu == rx_ring->count)
                ntu = 0;
-       }
 
-       /* clear the status bits for the next_to_use descriptor */
-       rx_desc->wb.status_error0 = 0;
        ice_release_rx_desc(rx_ring, ntu);
 
        return count == nb_buffs;
@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
 /**
  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
  * @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
  *
  * This function allocates a new skb from a zero-copy Rx buffer.
  *
  * Returns the skb on success, NULL on failure.
  */
 static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
 {
-       struct xdp_buff *xdp = *xdp_arr;
+       unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
        unsigned int metasize = xdp->data - xdp->data_meta;
        unsigned int datasize = xdp->data_end - xdp->data;
-       unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
        struct sk_buff *skb;
 
        skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
                skb_metadata_set(skb, metasize);
 
        xsk_buff_free(xdp);
-       *xdp_arr = NULL;
        return skb;
 }
 
@@ -507,7 +505,6 @@ out_failure:
 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
        struct ice_tx_ring *xdp_ring;
        unsigned int xdp_xmit = 0;
        struct bpf_prog *xdp_prog;
@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
        while (likely(total_rx_packets < (unsigned int)budget)) {
                union ice_32b_rx_flex_desc *rx_desc;
                unsigned int size, xdp_res = 0;
-               struct xdp_buff **xdp;
+               struct xdp_buff *xdp;
                struct sk_buff *skb;
                u16 stat_err_bits;
                u16 vlan_tag = 0;
@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
+               xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+
                size = le16_to_cpu(rx_desc->wb.pkt_len) &
                                   ICE_RX_FLX_DESC_PKT_LEN_M;
-               if (!size)
-                       break;
+               if (!size) {
+                       xdp->data = NULL;
+                       xdp->data_end = NULL;
+                       xdp->data_hard_start = NULL;
+                       xdp->data_meta = NULL;
+                       goto construct_skb;
+               }
 
-               xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
-               xsk_buff_set_size(*xdp, size);
-               xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+               xsk_buff_set_size(xdp, size);
+               xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
 
-               xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
+               xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
                if (xdp_res) {
                        if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
                                xdp_xmit |= xdp_res;
                        else
-                               xsk_buff_free(*xdp);
+                               xsk_buff_free(xdp);
 
-                       *xdp = NULL;
                        total_rx_bytes += size;
                        total_rx_packets++;
-                       cleaned_count++;
 
                        ice_bump_ntc(rx_ring);
                        continue;
                }
-
+construct_skb:
                /* XDP_PASS path */
                skb = ice_construct_skb_zc(rx_ring, xdp);
                if (!skb) {
@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                        break;
                }
 
-               cleaned_count++;
                ice_bump_ntc(rx_ring);
 
                if (eth_skb_pad(skb)) {
@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                ice_receive_skb(rx_ring, skb, vlan_tag);
        }
 
-       if (cleaned_count >= ICE_RX_BUF_WRITE)
-               failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+       failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
 
        ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
        ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
  */
 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
 {
-       u16 i;
-
-       for (i = 0; i < rx_ring->count; i++) {
-               struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+       u16 count_mask = rx_ring->count - 1;
+       u16 ntc = rx_ring->next_to_clean;
+       u16 ntu = rx_ring->next_to_use;
 
-               if (!xdp)
-                       continue;
+       for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+               struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
 
-               *xdp = NULL;
+               xsk_buff_free(xdp);
        }
 }
 
index fd54d3e..446894d 100644 (file)
@@ -7648,6 +7648,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
        struct vf_mac_filter *entry = NULL;
        int ret = 0;
 
+       if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
+           !vf_data->trusted) {
+               dev_warn(&pdev->dev,
+                        "VF %d requested MAC filter but is administratively denied\n",
+                         vf);
+               return -EINVAL;
+       }
+       if (!is_valid_ether_addr(addr)) {
+               dev_warn(&pdev->dev,
+                        "VF %d attempted to set invalid MAC filter\n",
+                         vf);
+               return -EINVAL;
+       }
+
        switch (info) {
        case E1000_VF_MAC_FILTER_CLR:
                /* remove all unicast MAC filters related to the current VF */
@@ -7661,20 +7675,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
                }
                break;
        case E1000_VF_MAC_FILTER_ADD:
-               if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
-                   !vf_data->trusted) {
-                       dev_warn(&pdev->dev,
-                                "VF %d requested MAC filter but is administratively denied\n",
-                                vf);
-                       return -EINVAL;
-               }
-               if (!is_valid_ether_addr(addr)) {
-                       dev_warn(&pdev->dev,
-                                "VF %d attempted to set invalid MAC filter\n",
-                                vf);
-                       return -EINVAL;
-               }
-
                /* try to find empty slot in the list */
                list_for_each(pos, &adapter->vf_macs.l) {
                        entry = list_entry(pos, struct vf_mac_filter, l);
@@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
        return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
 
        wr32(E1000_WUS, ~0);
 
-       rtnl_lock();
+       if (!rpm)
+               rtnl_lock();
        if (!err && netif_running(netdev))
                err = __igb_open(netdev, true);
 
        if (!err)
                netif_device_attach(netdev);
-       rtnl_unlock();
+       if (!rpm)
+               rtnl_unlock();
 
        return err;
 }
 
+static int __maybe_unused igb_resume(struct device *dev)
+{
+       return __igb_resume(dev, false);
+}
+
 static int __maybe_unused igb_runtime_idle(struct device *dev)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
 {
-       return igb_resume(dev);
+       return __igb_resume(dev, true);
 }
 
 static void igb_shutdown(struct pci_dev *pdev)
@@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  *  @pdev: Pointer to PCI device
  *
  *  Restart the card from scratch, as if from a cold-boot. Implementation
- *  resembles the first-half of the igb_resume routine.
+ *  resembles the first-half of the __igb_resume routine.
  **/
 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
 {
@@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  *
  *  This callback is called when the error recovery driver tells us that
  *  its OK to resume normal operation. Implementation resembles the
- *  second-half of the igb_resume routine.
+ *  second-half of the __igb_resume routine.
  */
 static void igb_io_resume(struct pci_dev *pdev)
 {
index 74ccd62..4d988da 100644 (file)
@@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_hw_init:
+       netif_napi_del(&adapter->rx_ring->napi);
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 err_sw_init:
index b2ef9fd..b6807e1 100644 (file)
@@ -636,7 +636,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
                ltrv = rd32(IGC_LTRMAXV);
                if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
                        ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
-                              (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+                              (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
                        wr32(IGC_LTRMAXV, ltrv);
                }
        }
index 8e44828..d28a80a 100644 (file)
@@ -5467,6 +5467,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+       if (icr & IGC_ICR_TS)
+               igc_tsync_interrupt(adapter);
+
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -5510,6 +5513,9 @@ static irqreturn_t igc_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+       if (icr & IGC_ICR_TS)
+               igc_tsync_interrupt(adapter);
+
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
index 30568e3..4f9245a 100644 (file)
@@ -768,7 +768,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
  */
 static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
 {
-       return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
+       if (!IS_ENABLED(CONFIG_X86_TSC))
+               return false;
+
+       /* FIXME: it was noticed that enabling support for PCIe PTM in
+        * some i225-V models could cause lockups when bringing the
+        * interface up/down. There should be no downsides to
+        * disabling crosstimestamping support for i225-V, as it
+        * doesn't have any PTP support. That way we gain some time
+        * while root causing the issue.
+        */
+       if (adapter->pdev->device == IGC_DEV_ID_I225_V)
+               return false;
+
+       return pcie_ptm_enabled(adapter->pdev);
 }
 
 static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
index 0f9f022..7068ecb 100644 (file)
@@ -3247,8 +3247,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                /* If Flow Director is enabled, set interrupt affinity */
                if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                        /* assign the mask for this irq */
-                       irq_set_affinity_hint(entry->vector,
-                                             &q_vector->affinity_mask);
+                       irq_update_affinity_hint(entry->vector,
+                                                &q_vector->affinity_mask);
                }
        }
 
@@ -3264,8 +3264,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 free_queue_irqs:
        while (vector) {
                vector--;
-               irq_set_affinity_hint(adapter->msix_entries[vector].vector,
-                                     NULL);
+               irq_update_affinity_hint(adapter->msix_entries[vector].vector,
+                                        NULL);
                free_irq(adapter->msix_entries[vector].vector,
                         adapter->q_vector[vector]);
        }
@@ -3398,7 +3398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
                        continue;
 
                /* clear the affinity_mask in the IRQ descriptor */
-               irq_set_affinity_hint(entry->vector, NULL);
+               irq_update_affinity_hint(entry->vector, NULL);
 
                free_irq(entry->vector, q_vector);
        }
@@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
        if (!speed && hw->mac.ops.get_link_capabilities) {
                ret = hw->mac.ops.get_link_capabilities(hw, &speed,
                                                        &autoneg);
+               /* remove NBASE-T speeds from default autonegotiation
+                * to accommodate broken network switches in the field
+                * which cannot cope with advertised NBASE-T speeds
+                */
                speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
                           IXGBE_LINK_SPEED_2_5GB_FULL);
        }
index 9724ffb..e4b50c7 100644 (file)
@@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
        /* flush pending Tx transactions */
        ixgbe_clear_tx_pending(hw);
 
+       /* set MDIO speed before talking to the PHY in case it's the 1st time */
+       ixgbe_set_mdio_speed(hw);
+
        /* PHY ops must be identified and initialized prior to reset */
        status = hw->phy.ops.init(hw);
        if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
index 0da09ea..80bfaf2 100644 (file)
@@ -71,6 +71,8 @@ struct xrx200_priv {
        struct xrx200_chan chan_tx;
        struct xrx200_chan chan_rx;
 
+       u16 rx_buf_size;
+
        struct net_device *net_dev;
        struct device *dev;
 
@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
        xrx200_pmac_w32(priv, val, offset);
 }
 
+static int xrx200_max_frame_len(int mtu)
+{
+       return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+       return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
 /* drop all the packets from the DMA ring */
 static void xrx200_flush_dma(struct xrx200_chan *ch)
 {
@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
                        break;
 
                desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
-                           (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
-                            ETH_FCS_LEN);
+                           ch->priv->rx_buf_size;
                ch->dma.desc++;
                ch->dma.desc %= LTQ_DESC_NUM;
        }
@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
 
 static int xrx200_alloc_skb(struct xrx200_chan *ch)
 {
-       int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
        struct sk_buff *skb = ch->skb[ch->dma.desc];
+       struct xrx200_priv *priv = ch->priv;
        dma_addr_t mapping;
        int ret = 0;
 
-       ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
-                                                         len);
+       ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
+                                                         priv->rx_buf_size);
        if (!ch->skb[ch->dma.desc]) {
                ret = -ENOMEM;
                goto skip;
        }
 
-       mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
-                                len, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+       mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+                                priv->rx_buf_size, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(priv->dev, mapping))) {
                dev_kfree_skb_any(ch->skb[ch->dma.desc]);
                ch->skb[ch->dma.desc] = skb;
                ret = -ENOMEM;
@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
        wmb();
 skip:
        ch->dma.desc_base[ch->dma.desc].ctl =
-               LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+               LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
 
        return ret;
 }
@@ -213,7 +224,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
        skb->protocol = eth_type_trans(skb, net_dev);
        netif_receive_skb(skb);
        net_dev->stats.rx_packets++;
-       net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
+       net_dev->stats.rx_bytes += len;
 
        return 0;
 }
@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
        int ret = 0;
 
        net_dev->mtu = new_mtu;
+       priv->rx_buf_size = xrx200_buffer_size(new_mtu);
 
        if (new_mtu <= old_mtu)
                return ret;
@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
                ret = xrx200_alloc_skb(ch_rx);
                if (ret) {
                        net_dev->mtu = old_mtu;
+                       priv->rx_buf_size = xrx200_buffer_size(old_mtu);
                        break;
                }
                dev_kfree_skb_any(skb);
@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
        net_dev->netdev_ops = &xrx200_netdev_ops;
        SET_NETDEV_DEV(net_dev, dev);
        net_dev->min_mtu = ETH_ZLEN;
-       net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+       net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+       priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
 
        /* load the memory ranges */
        priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
index 6480696..6da8a59 100644 (file)
@@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
 
        if (priv->percpu_pools) {
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
                if (err < 0)
                        goto err_free_dma;
 
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
                if (err < 0)
                        goto err_unregister_rxq_short;
 
index 0ef68fd..61c2090 100644 (file)
@@ -5,6 +5,8 @@
  *
  */
 
+#include <linux/module.h>
+
 #include "otx2_common.h"
 #include "otx2_ptp.h"
 
index 4369a3f..c687dc9 100644 (file)
@@ -54,12 +54,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
 struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
                                                 u32 dev_id, u32 hw_id)
 {
-       struct prestera_port *port = NULL;
+       struct prestera_port *port = NULL, *tmp;
 
        read_lock(&sw->port_list_lock);
-       list_for_each_entry(port, &sw->port_list, list) {
-               if (port->dev_id == dev_id && port->hw_id == hw_id)
+       list_for_each_entry(tmp, &sw->port_list, list) {
+               if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+                       port = tmp;
                        break;
+               }
        }
        read_unlock(&sw->port_list_lock);
 
@@ -68,12 +70,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
 
 struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
 {
-       struct prestera_port *port = NULL;
+       struct prestera_port *port = NULL, *tmp;
 
        read_lock(&sw->port_list_lock);
-       list_for_each_entry(port, &sw->port_list, list) {
-               if (port->id == id)
+       list_for_each_entry(tmp, &sw->port_list, list) {
+               if (tmp->id == id) {
+                       port = tmp;
                        break;
+               }
        }
        read_unlock(&sw->port_list_lock);
 
@@ -764,23 +768,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
                                      struct net_device *dev,
                                      unsigned long event, void *ptr)
 {
-       struct netdev_notifier_changeupper_info *info = ptr;
+       struct netdev_notifier_info *info = ptr;
+       struct netdev_notifier_changeupper_info *cu_info;
        struct prestera_port *port = netdev_priv(dev);
        struct netlink_ext_ack *extack;
        struct net_device *upper;
 
-       extack = netdev_notifier_info_to_extack(&info->info);
-       upper = info->upper_dev;
+       extack = netdev_notifier_info_to_extack(info);
+       cu_info = container_of(info,
+                              struct netdev_notifier_changeupper_info,
+                              info);
 
        switch (event) {
        case NETDEV_PRECHANGEUPPER:
+               upper = cu_info->upper_dev;
                if (!netif_is_bridge_master(upper) &&
                    !netif_is_lag_master(upper)) {
                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
                        return -EINVAL;
                }
 
-               if (!info->linking)
+               if (!cu_info->linking)
                        break;
 
                if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +797,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
                }
 
                if (netif_is_lag_master(upper) &&
-                   !prestera_lag_master_check(upper, info->upper_info, extack))
+                   !prestera_lag_master_check(upper, cu_info->upper_info, extack))
                        return -EOPNOTSUPP;
                if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
                        NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +813,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
                break;
 
        case NETDEV_CHANGEUPPER:
+               upper = cu_info->upper_dev;
                if (netif_is_bridge_master(upper)) {
-                       if (info->linking)
+                       if (cu_info->linking)
                                return prestera_bridge_port_join(upper, port,
                                                                 extack);
                        else
                                prestera_bridge_port_leave(upper, port);
                } else if (netif_is_lag_master(upper)) {
-                       if (info->linking)
+                       if (cu_info->linking)
                                return prestera_lag_port_add(port, upper);
                        else
                                prestera_lag_port_del(port);
index 9e48509..414e390 100644 (file)
@@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
            cpumask_empty(eq->affinity_mask))
                return;
 
-       hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+       hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
        if (hint_err)
-               mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+               mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
 }
 #endif
 
@@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
                if (eq_table->eq[i].have_irq) {
                        free_cpumask_var(eq_table->eq[i].affinity_mask);
-#if defined(CONFIG_SMP)
-                       irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
-#endif
+                       irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
                        free_irq(eq_table->eq[i].irq, eq_table->eq + i);
                        eq_table->eq[i].have_irq = 0;
                }
index f0ac6b0..b47a0d3 100644 (file)
@@ -783,6 +783,8 @@ struct mlx5e_channel {
        DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
        int                        ix;
        int                        cpu;
+       /* Sync between icosq recovery and XSK enable/disable. */
+       struct mutex               icosq_recovery_lock;
 };
 
 struct mlx5e_ptp;
@@ -1014,9 +1016,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
 void mlx5e_destroy_rq(struct mlx5e_rq *rq);
 
 struct mlx5e_sq_param;
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
-                    struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
                     struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
                     struct mlx5e_xdpsq *sq, bool is_redirect);
index d5b7110..0107e4e 100644 (file)
@@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
 void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
 
 #define MLX5E_REPORTER_PER_Q_MAX_LEN 256
 
index d6c7c81..7c9dd3a 100644 (file)
@@ -66,7 +66,7 @@ mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
 
 static inline void
 mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
-                    struct sk_buff *skb) {}
+                    struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
 
 #endif /* CONFIG_MLX5_CLS_ACT */
 
index 74086eb..2684e9d 100644 (file)
@@ -62,6 +62,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
 
 static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
 {
+       struct mlx5e_rq *xskrq = NULL;
        struct mlx5_core_dev *mdev;
        struct mlx5e_icosq *icosq;
        struct net_device *dev;
@@ -70,7 +71,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
        int err;
 
        icosq = ctx;
+
+       mutex_lock(&icosq->channel->icosq_recovery_lock);
+
+       /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
        rq = &icosq->channel->rq;
+       if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
+               xskrq = &icosq->channel->xskrq;
        mdev = icosq->channel->mdev;
        dev = icosq->channel->netdev;
        err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
@@ -84,6 +91,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
                goto out;
 
        mlx5e_deactivate_rq(rq);
+       if (xskrq)
+               mlx5e_deactivate_rq(xskrq);
+
        err = mlx5e_wait_for_icosq_flush(icosq);
        if (err)
                goto out;
@@ -97,15 +107,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
                goto out;
 
        mlx5e_reset_icosq_cc_pc(icosq);
+
        mlx5e_free_rx_in_progress_descs(rq);
+       if (xskrq)
+               mlx5e_free_rx_in_progress_descs(xskrq);
+
        clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
        mlx5e_activate_icosq(icosq);
-       mlx5e_activate_rq(rq);
 
+       mlx5e_activate_rq(rq);
        rq->stats->recover++;
+
+       if (xskrq) {
+               mlx5e_activate_rq(xskrq);
+               xskrq->stats->recover++;
+       }
+
+       mutex_unlock(&icosq->channel->icosq_recovery_lock);
+
        return 0;
 out:
        clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+       mutex_unlock(&icosq->channel->icosq_recovery_lock);
        return err;
 }
 
@@ -706,6 +729,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
        mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
 }
 
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
+{
+       mutex_lock(&c->icosq_recovery_lock);
+}
+
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
+{
+       mutex_unlock(&c->icosq_recovery_lock);
+}
+
 static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
        .name = "rx",
        .recover = mlx5e_rx_reporter_recover,
index 4f4bc87..614cd94 100644 (file)
@@ -466,6 +466,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
        return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
 }
 
+static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+                                         void *ctx)
+{
+       struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
+
+       return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
+}
+
 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
                                          struct devlink_fmsg *fmsg)
 {
@@ -561,7 +569,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
        to_ctx.sq = sq;
        err_ctx.ctx = &to_ctx;
        err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
-       err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+       err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
        snprintf(err_str, sizeof(err_str),
                 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
                 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
index 538bc24..8526a5f 100644 (file)
@@ -4,6 +4,7 @@
 #include "setup.h"
 #include "en/params.h"
 #include "en/txrx.h"
+#include "en/health.h"
 
 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
  * change unexpectedly, and mlx5e has a minimum valid stride size for striding
@@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
 
 void mlx5e_activate_xsk(struct mlx5e_channel *c)
 {
+       /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
+        * activating XSKRQ in the middle of recovery.
+        */
+       mlx5e_reporter_icosq_suspend_recovery(c);
        set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+       mlx5e_reporter_icosq_resume_recovery(c);
+
        /* TX queue is created active. */
 
        spin_lock_bh(&c->async_icosq_lock);
@@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
 
 void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
 {
-       mlx5e_deactivate_rq(&c->xskrq);
+       /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
+        * middle of recovery. Suspend the recovery to avoid it.
+        */
+       mlx5e_reporter_icosq_suspend_recovery(c);
+       clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+       mlx5e_reporter_icosq_resume_recovery(c);
+       synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+
        /* TX queue is disabled on close. */
 }
index 6557159..4137984 100644 (file)
@@ -1087,8 +1087,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
 void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
        cancel_work_sync(&rq->dim.work);
-       if (rq->icosq)
-               cancel_work_sync(&rq->icosq->recover_work);
        cancel_work_sync(&rq->recover_work);
        mlx5e_destroy_rq(rq);
        mlx5e_free_rx_descs(rq);
@@ -1216,9 +1214,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
        mlx5e_reporter_icosq_cqe_err(sq);
 }
 
+static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+       struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+                                             recover_work);
+
+       /* Not implemented yet. */
+
+       netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
+}
+
 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
                             struct mlx5e_sq_param *param,
-                            struct mlx5e_icosq *sq)
+                            struct mlx5e_icosq *sq,
+                            work_func_t recover_work_func)
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
@@ -1239,7 +1248,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
        if (err)
                goto err_sq_wq_destroy;
 
-       INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+       INIT_WORK(&sq->recover_work, recover_work_func);
 
        return 0;
 
@@ -1575,13 +1584,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
        mlx5e_reporter_tx_err_cqe(sq);
 }
 
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
-                    struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+                           struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
+                           work_func_t recover_work_func)
 {
        struct mlx5e_create_sq_param csp = {};
        int err;
 
-       err = mlx5e_alloc_icosq(c, param, sq);
+       err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
        if (err)
                return err;
 
@@ -1620,7 +1630,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
        synchronize_net(); /* Sync with NAPI. */
 }
 
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
 {
        struct mlx5e_channel *c = sq->channel;
 
@@ -2084,11 +2094,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
 
        spin_lock_init(&c->async_icosq_lock);
 
-       err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
+       err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
+                              mlx5e_async_icosq_err_cqe_work);
        if (err)
                goto err_close_xdpsq_cq;
 
-       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+       mutex_init(&c->icosq_recovery_lock);
+
+       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
+                              mlx5e_icosq_err_cqe_work);
        if (err)
                goto err_close_async_icosq;
 
@@ -2156,9 +2170,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
        mlx5e_close_xdpsq(&c->xdpsq);
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
+       /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
+       cancel_work_sync(&c->icosq.recover_work);
        mlx5e_close_rq(&c->rq);
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
+       mutex_destroy(&c->icosq_recovery_lock);
        mlx5e_close_icosq(&c->async_icosq);
        if (c->xdp)
                mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -3724,12 +3741,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
 
 static int mlx5e_handle_feature(struct net_device *netdev,
                                netdev_features_t *features,
-                               netdev_features_t wanted_features,
                                netdev_features_t feature,
                                mlx5e_feature_handler feature_handler)
 {
-       netdev_features_t changes = wanted_features ^ netdev->features;
-       bool enable = !!(wanted_features & feature);
+       netdev_features_t changes = *features ^ netdev->features;
+       bool enable = !!(*features & feature);
        int err;
 
        if (!(changes & feature))
@@ -3737,22 +3753,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
 
        err = feature_handler(netdev, enable);
        if (err) {
+               MLX5E_SET_FEATURE(features, feature, !enable);
                netdev_err(netdev, "%s feature %pNF failed, err %d\n",
                           enable ? "Enable" : "Disable", &feature, err);
                return err;
        }
 
-       MLX5E_SET_FEATURE(features, feature, enable);
        return 0;
 }
 
 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
 {
-       netdev_features_t oper_features = netdev->features;
+       netdev_features_t oper_features = features;
        int err = 0;
 
 #define MLX5E_HANDLE_FEATURE(feature, handler) \
-       mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+       mlx5e_handle_feature(netdev, &oper_features, feature, handler)
 
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
index 3d45f4a..5e454a1 100644 (file)
@@ -1196,21 +1196,16 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
        if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
                goto offload_rule_0;
 
-       if (flow_flag_test(flow, CT)) {
-               mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
-               return;
-       }
-
-       if (flow_flag_test(flow, SAMPLE)) {
-               mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
-               return;
-       }
-
        if (attr->esw_attr->split_count)
                mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
 
+       if (flow_flag_test(flow, CT))
+               mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+       else if (flow_flag_test(flow, SAMPLE))
+               mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+       else
 offload_rule_0:
-       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
 }
 
 struct mlx5_flow_handle *
@@ -1445,7 +1440,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                                                        MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
                                                        metadata);
                        if (err)
-                               return err;
+                               goto err_out;
+
+                       attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
                }
        }
 
@@ -1461,13 +1458,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                if (attr->chain) {
                        NL_SET_ERR_MSG_MOD(extack,
                                           "Internal port rule is only supported on chain 0");
-                       return -EOPNOTSUPP;
+                       err = -EOPNOTSUPP;
+                       goto err_out;
                }
 
                if (attr->dest_chain) {
                        NL_SET_ERR_MSG_MOD(extack,
                                           "Internal port rule offload doesn't support goto action");
-                       return -EOPNOTSUPP;
+                       err = -EOPNOTSUPP;
+                       goto err_out;
                }
 
                int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
@@ -1475,8 +1474,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                                                 flow_flag_test(flow, EGRESS) ?
                                                 MLX5E_TC_INT_PORT_EGRESS :
                                                 MLX5E_TC_INT_PORT_INGRESS);
-               if (IS_ERR(int_port))
-                       return PTR_ERR(int_port);
+               if (IS_ERR(int_port)) {
+                       err = PTR_ERR(int_port);
+                       goto err_out;
+               }
 
                esw_attr->int_port = int_port;
        }
index 97e5845..d5e4763 100644 (file)
@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
 
 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
 {
+       if (!mlx5_chains_prios_supported(chains))
+               return 1;
+
        if (mlx5_chains_ignore_flow_level_supported(chains))
                return UINT_MAX;
 
index 7df9c7f..6508349 100644 (file)
@@ -1809,12 +1809,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
 
 int mlx5_recover_device(struct mlx5_core_dev *dev)
 {
-       int ret = -EIO;
+       if (!mlx5_core_is_sf(dev)) {
+               mlx5_pci_disable_device(dev);
+               if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
+                       return -EIO;
+       }
 
-       mlx5_pci_disable_device(dev);
-       if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
-               ret = mlx5_load_one(dev);
-       return ret;
+       return mlx5_load_one(dev);
 }
 
 static struct pci_driver mlx5_core_driver = {
index 830444f..fd7a671 100644 (file)
@@ -143,11 +143,11 @@ static void irq_release(struct mlx5_irq *irq)
        struct mlx5_irq_pool *pool = irq->pool;
 
        xa_erase(&pool->irqs, irq->index);
-       /* free_irq requires that affinity and rmap will be cleared
+       /* free_irq requires that affinity_hint and rmap will be cleared
         * before calling it. This is why there is asymmetry with set_rmap
         * which should be called after alloc_irq but before request_irq.
         */
-       irq_set_affinity_hint(irq->irqn, NULL);
+       irq_update_affinity_hint(irq->irqn, NULL);
        free_cpumask_var(irq->mask);
        free_irq(irq->irqn, &irq->nh);
        kfree(irq);
@@ -316,7 +316,7 @@ static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
        if (IS_ERR(irq))
                return irq;
        cpumask_copy(irq->mask, affinity);
-       irq_set_affinity_hint(irq->irqn, irq->mask);
+       irq_set_affinity_and_hint(irq->irqn, irq->mask);
        return irq;
 }
 
@@ -356,8 +356,8 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
        new_irq = irq_pool_create_irq(pool, affinity);
        if (IS_ERR(new_irq)) {
                if (!least_loaded_irq) {
-                       mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
-                                     cpumask_first(affinity));
+                       mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
+                                     PTR_ERR(new_irq));
                        mutex_unlock(&pool->lock);
                        return new_irq;
                }
@@ -398,8 +398,8 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
        cpumask_copy(irq->mask, affinity);
        if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
            cpumask_empty(irq->mask))
-               cpumask_set_cpu(0, irq->mask);
-       irq_set_affinity_hint(irq->irqn, irq->mask);
+               cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
+       irq_set_affinity_and_hint(irq->irqn, irq->mask);
 unlock:
        mutex_unlock(&pool->lock);
        return irq;
index 8cbd36c..c54cc45 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (c) 2019 Mellanox Technologies. */
 
 #include <linux/mlx5/eswitch.h>
+#include <linux/err.h>
 #include "dr_types.h"
 
 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
@@ -72,9 +73,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
        }
 
        dmn->uar = mlx5_get_uars_page(dmn->mdev);
-       if (!dmn->uar) {
+       if (IS_ERR(dmn->uar)) {
                mlx5dr_err(dmn, "Couldn't allocate UAR\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(dmn->uar);
                goto clean_pd;
        }
 
@@ -163,9 +164,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
 
 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
 {
-       return dr_domain_query_vport(dmn,
-                                    dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
-                                    false,
+       return dr_domain_query_vport(dmn, 0, false,
                                     &dmn->info.caps.vports.esw_manager_caps);
 }
 
index 217e3b3..c34833f 100644 (file)
@@ -8494,7 +8494,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
        u8 mac_profile;
        int err;
 
-       if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+       if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+           !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
                return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
 
        err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
index 2e25798..7f49042 100644 (file)
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
                return ret;
 
        netdev->irq = platform_get_irq(pdev, 0);
+       if (netdev->irq < 0)
+               return netdev->irq;
 
        return ks8851_probe_common(netdev, dev, msg_enable);
 }
index 34b971f..078d6a5 100644 (file)
@@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
        if (err)
                goto out;
 
-       err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
-                                    &hwc_wq->msg_buf);
-       if (err)
-               goto out;
-
        hwc_wq->hwc = hwc;
        hwc_wq->gdma_wq = queue;
        hwc_wq->queue_depth = q_depth;
        hwc_wq->hwc_cq = hwc_cq;
 
+       err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+                                    &hwc_wq->msg_buf);
+       if (err)
+               goto out;
+
        *hwc_wq_ptr = hwc_wq;
        return 0;
 out:
index d7ac030..34c0d2d 100644 (file)
@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
                return -ENOMEM;
 
        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
-       if (!cache)
+       if (!cache) {
+               nfp_cpp_area_free(area);
                return -ENOMEM;
+       }
 
        cache->id = 0;
        cache->addr = 0;
index 63f8a81..2ff7be1 100644 (file)
@@ -3135,7 +3135,7 @@ int ionic_lif_init(struct ionic_lif *lif)
                return -EINVAL;
        }
 
-       lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
+       lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
        if (!lif->dbid_inuse) {
                dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
                return -ENOMEM;
index 065e900..999abcf 100644 (file)
@@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        data_split = true;
                }
        } else {
+               if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+                       DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+                       qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+
                val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
                         ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
        }
index 1e6d72a..71523d7 100644 (file)
@@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
 
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-       err = ql_wait_for_drvr_lock(qdev);
-       if (err) {
-               err = ql_adapter_initialize(qdev);
-               if (err) {
-                       netdev_err(ndev, "Unable to initialize adapter\n");
-                       goto err_init;
-               }
-               netdev_err(ndev, "Releasing driver lock\n");
-               ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
-       } else {
+       if (!ql_wait_for_drvr_lock(qdev)) {
                netdev_err(ndev, "Could not acquire driver lock\n");
+               err = -ENODEV;
                goto err_lock;
        }
 
+       err = ql_adapter_initialize(qdev);
+       if (err) {
+               netdev_err(ndev, "Unable to initialize adapter\n");
+               goto err_init;
+       }
+       ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
        set_bit(QL_ADAPTER_UP, &qdev->flags);
index 7160b42..d0111cb 100644 (file)
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
                                   struct qlcnic_info *, u16);
 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
                              struct qlcnic_vf_info *, u16);
index dd03be3..42a44c9 100644 (file)
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
                                            struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
-       int i, num_vlans;
+       int i, num_vlans, ret;
        u16 *vlans;
 
        if (sriov->allowed_vlans)
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
        dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
                 sriov->num_allowed_vlans);
 
-       qlcnic_sriov_alloc_vlans(adapter);
+       ret = qlcnic_sriov_alloc_vlans(adapter);
+       if (ret)
+               return ret;
 
        if (!sriov->any_vlan)
                return 0;
@@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
        return err;
 }
 
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
        struct qlcnic_vf_info *vf;
@@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
                vf = &sriov->vf_info[i];
                vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
                                          sizeof(*vf->sriov_vlans), GFP_KERNEL);
+               if (!vf->sriov_vlans)
+                       return -ENOMEM;
        }
+
+       return 0;
 }
 
 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
index 447720b..e90fa97 100644 (file)
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
        if (err)
                goto del_flr_queue;
 
-       qlcnic_sriov_alloc_vlans(adapter);
+       err = qlcnic_sriov_alloc_vlans(adapter);
+       if (err)
+               goto del_flr_queue;
 
        return err;
 
index 6aa8122..e77a5cb 100644 (file)
@@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx,
        ef100_common_stat_mask(mask);
        ef100_ethtool_stat_mask(mask);
 
+       if (!mc_stats)
+               return 0;
+
        efx_nic_copy_stats(efx, mc_stats);
        efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
                             stats, mc_stats, false);
index 966f13e..0c6cc21 100644 (file)
@@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
        struct ef4_rx_page_state *state;
        unsigned index;
 
+       if (unlikely(!rx_queue->page_ring))
+               return NULL;
        index = rx_queue->page_remove & rx_queue->page_ptr_mask;
        page = rx_queue->page_ring[index];
        if (page == NULL)
@@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
 {
        struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
 
+       if (unlikely(!rx_queue->page_ring))
+               return;
+
        do {
                ef4_recycle_rx_page(channel, rx_buf);
                rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
@@ -728,7 +733,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
                                            efx->rx_bufs_per_page);
        rx_queue->page_ring = kcalloc(page_ring_size,
                                      sizeof(*rx_queue->page_ring), GFP_KERNEL);
-       rx_queue->page_ptr_mask = page_ring_size - 1;
+       if (!rx_queue->page_ring)
+               rx_queue->page_ptr_mask = 0;
+       else
+               rx_queue->page_ptr_mask = page_ring_size - 1;
 }
 
 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
index 68fc7d3..633ca77 100644 (file)
@@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
        unsigned int index;
        struct page *page;
 
+       if (unlikely(!rx_queue->page_ring))
+               return NULL;
        index = rx_queue->page_remove & rx_queue->page_ptr_mask;
        page = rx_queue->page_ring[index];
        if (page == NULL)
@@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
 {
        struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 
+       if (unlikely(!rx_queue->page_ring))
+               return;
+
        do {
                efx_recycle_rx_page(channel, rx_buf);
                rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
@@ -150,7 +155,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
                                            efx->rx_bufs_per_page);
        rx_queue->page_ring = kcalloc(page_ring_size,
                                      sizeof(*rx_queue->page_ring), GFP_KERNEL);
-       rx_queue->page_ptr_mask = page_ring_size - 1;
+       if (!rx_queue->page_ring)
+               rx_queue->page_ptr_mask = 0;
+       else
+               rx_queue->page_ptr_mask = page_ring_size - 1;
 }
 
 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
index 89381f7..dd6f69c 100644 (file)
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
 
        ndev->dma = (unsigned char)-1;
        ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq < 0) {
+               ret = ndev->irq;
+               goto release_both;
+       }
+
        lp = netdev_priv(ndev);
        lp->netdev = ndev;
 #ifdef SMC_DYNAMIC_BUS_CONFIG
index 6924a6a..c469abc 100644 (file)
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
        void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
        void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
        void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+       bool regs_valid;
        u32 regs[];
 };
 
@@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = {
        .set_to_rmii = rk3568_set_to_rmii,
        .set_rgmii_speed = rk3568_set_gmac_speed,
        .set_rmii_speed = rk3568_set_gmac_speed,
+       .regs_valid = true,
        .regs = {
                0xfe2a0000, /* gmac0 */
                0xfe010000, /* gmac1 */
@@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
         * to be distinguished.
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res) {
+       if (res && ops->regs_valid) {
                int i = 0;
 
                while (ops->regs[i]) {
index 66fc8be..e2e0f97 100644 (file)
@@ -26,7 +26,7 @@
 #define ETHER_CLK_SEL_FREQ_SEL_125M    (BIT(9) | BIT(8))
 #define ETHER_CLK_SEL_FREQ_SEL_50M     BIT(9)
 #define ETHER_CLK_SEL_FREQ_SEL_25M     BIT(8)
-#define ETHER_CLK_SEL_FREQ_SEL_2P5M    BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M    0
 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
index 5f12973..873b9e3 100644 (file)
@@ -172,6 +172,19 @@ struct stmmac_flow_entry {
        int is_l4;
 };
 
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+       STMMAC_RFS_T_VLAN,
+       STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+       unsigned long cookie;
+       int in_use;
+       int type;
+       int tc;
+};
+
 struct stmmac_priv {
        /* Frequently used values are kept adjacent for cache effect */
        u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -289,6 +302,10 @@ struct stmmac_priv {
        struct stmmac_tc_entry *tc_entries;
        unsigned int flow_entries_max;
        struct stmmac_flow_entry *flow_entries;
+       unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+       unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+       unsigned int rfs_entries_total;
+       struct stmmac_rfs_entry *rfs_entries;
 
        /* Pulse Per Second output */
        struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
index da8306f..8ded4be 100644 (file)
@@ -1461,16 +1461,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       if (priv->dma_cap.addr64 <= 32)
+               gfp |= GFP_DMA32;
 
        if (!buf->page) {
-               buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+               buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                if (!buf->page)
                        return -ENOMEM;
                buf->page_offset = stmmac_rx_offset(priv);
        }
 
        if (priv->sph && !buf->sec_page) {
-               buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+               buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                if (!buf->sec_page)
                        return -ENOMEM;
 
@@ -4482,6 +4486,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        int dirty = stmmac_rx_dirty(priv, queue);
        unsigned int entry = rx_q->dirty_rx;
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       if (priv->dma_cap.addr64 <= 32)
+               gfp |= GFP_DMA32;
 
        while (dirty-- > 0) {
                struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4494,13 +4502,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
                        p = rx_q->dma_rx + entry;
 
                if (!buf->page) {
-                       buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+                       buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                        if (!buf->page)
                                break;
                }
 
                if (priv->sph && !buf->sec_page) {
-                       buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+                       buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                        if (!buf->sec_page)
                                break;
 
index 580cc03..be9b58b 100644 (file)
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
                time.tv_nsec = priv->plat->est->btr_reserve[0];
                time.tv_sec = priv->plat->est->btr_reserve[1];
                basetime = timespec64_to_ktime(time);
-               cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+               cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
                             priv->plat->est->ctr[0];
                time = stmmac_calc_tas_basetime(basetime,
                                                current_time_ns,
index 1c4ea0b..d0a2b28 100644 (file)
@@ -232,11 +232,33 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
        }
 }
 
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+       int i;
+
+       priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+
+       for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+               priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+       priv->rfs_entries = devm_kcalloc(priv->device,
+                                        priv->rfs_entries_total,
+                                        sizeof(*priv->rfs_entries),
+                                        GFP_KERNEL);
+       if (!priv->rfs_entries)
+               return -ENOMEM;
+
+       dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+                priv->rfs_entries_total);
+
+       return 0;
+}
+
 static int tc_init(struct stmmac_priv *priv)
 {
        struct dma_features *dma_cap = &priv->dma_cap;
        unsigned int count;
-       int i;
+       int ret, i;
 
        if (dma_cap->l3l4fnum) {
                priv->flow_entries_max = dma_cap->l3l4fnum;
@@ -250,10 +272,14 @@ static int tc_init(struct stmmac_priv *priv)
                for (i = 0; i < priv->flow_entries_max; i++)
                        priv->flow_entries[i].idx = i;
 
-               dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
+               dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
                         priv->flow_entries_max);
        }
 
+       ret = tc_rfs_init(priv);
+       if (ret)
+               return -ENOMEM;
+
        if (!priv->plat->fpe_cfg) {
                priv->plat->fpe_cfg = devm_kzalloc(priv->device,
                                                   sizeof(*priv->plat->fpe_cfg),
@@ -607,16 +633,45 @@ static int tc_del_flow(struct stmmac_priv *priv,
        return ret;
 }
 
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+                                           struct flow_cls_offload *cls,
+                                           bool get_free)
+{
+       int i;
+
+       for (i = 0; i < priv->rfs_entries_total; i++) {
+               struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+               if (entry->cookie == cls->cookie)
+                       return entry;
+               if (get_free && entry->in_use == false)
+                       return entry;
+       }
+
+       return NULL;
+}
+
 #define VLAN_PRIO_FULL_MASK (0x07)
 
 static int tc_add_vlan_flow(struct stmmac_priv *priv,
                            struct flow_cls_offload *cls)
 {
+       struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
        struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
        struct flow_dissector *dissector = rule->match.dissector;
        int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
        struct flow_match_vlan match;
 
+       if (!entry) {
+               entry = tc_find_rfs(priv, cls, true);
+               if (!entry)
+                       return -ENOENT;
+       }
+
+       if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+           priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+               return -ENOENT;
+
        /* Nothing to do here */
        if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
                return -EINVAL;
@@ -638,6 +693,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
 
                prio = BIT(match.key->vlan_priority);
                stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+               entry->in_use = true;
+               entry->cookie = cls->cookie;
+               entry->tc = tc;
+               entry->type = STMMAC_RFS_T_VLAN;
+               priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
        }
 
        return 0;
@@ -646,20 +707,19 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
 static int tc_del_vlan_flow(struct stmmac_priv *priv,
                            struct flow_cls_offload *cls)
 {
-       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
-       struct flow_dissector *dissector = rule->match.dissector;
-       int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+       struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 
-       /* Nothing to do here */
-       if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
-               return -EINVAL;
+       if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+               return -ENOENT;
 
-       if (tc < 0) {
-               netdev_err(priv->dev, "Invalid traffic class\n");
-               return -EINVAL;
-       }
+       stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+       entry->in_use = false;
+       entry->cookie = 0;
+       entry->tc = 0;
+       entry->type = 0;
 
-       stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+       priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
 
        return 0;
 }
index c092cb6..ffbbda8 100644 (file)
@@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                if (ret < 0) {
                        dev_err(dev, "%pOF error reading port_id %d\n",
                                port_np, ret);
-                       return ret;
+                       goto of_node_put;
                }
 
                if (!port_id || port_id > common->port_num) {
                        dev_err(dev, "%pOF has invalid port_id %u %s\n",
                                port_np, port_id, port_np->name);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto of_node_put;
                }
 
                port = am65_common_get_port(common, port_id);
@@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                                (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
 
                port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
-               if (IS_ERR(port->slave.mac_sl))
-                       return PTR_ERR(port->slave.mac_sl);
+               if (IS_ERR(port->slave.mac_sl)) {
+                       ret = PTR_ERR(port->slave.mac_sl);
+                       goto of_node_put;
+               }
 
                port->disabled = !of_device_is_available(port_np);
                if (port->disabled) {
@@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                        ret = PTR_ERR(port->slave.ifphy);
                        dev_err(dev, "%pOF error retrieving port phy: %d\n",
                                port_np, ret);
-                       return ret;
+                       goto of_node_put;
                }
 
                port->slave.mac_only =
@@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                /* get phy/link info */
                if (of_phy_is_fixed_link(port_np)) {
                        ret = of_phy_register_fixed_link(port_np);
-                       if (ret)
-                               return dev_err_probe(dev, ret,
+                       if (ret) {
+                               ret = dev_err_probe(dev, ret,
                                                     "failed to register fixed-link phy %pOF\n",
                                                     port_np);
+                               goto of_node_put;
+                       }
                        port->slave.phy_node = of_node_get(port_np);
                } else {
                        port->slave.phy_node =
@@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                if (!port->slave.phy_node) {
                        dev_err(dev,
                                "slave[%d] no phy found\n", port_id);
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto of_node_put;
                }
 
                ret = of_get_phy_mode(port_np, &port->slave.phy_if);
                if (ret) {
                        dev_err(dev, "%pOF read phy-mode err %d\n",
                                port_np, ret);
-                       return ret;
+                       goto of_node_put;
                }
 
                ret = of_get_mac_address(port_np, port->slave.mac_addr);
@@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
        }
 
        return 0;
+
+of_node_put:
+       of_node_put(port_np);
+       of_node_put(node);
+       return ret;
 }
 
 static void am65_cpsw_pcpu_stats_free(void *data)
index b06c17a..ebd2870 100644 (file)
@@ -1262,6 +1262,11 @@ static int fjes_probe(struct platform_device *plat_dev)
        hw->hw_res.start = res->start;
        hw->hw_res.size = resource_size(res);
        hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+       if (hw->hw_res.irq < 0) {
+               err = hw->hw_res.irq;
+               goto err_free_control_wq;
+       }
+
        err = fjes_hw_init(&adapter->hw);
        if (err)
                goto err_free_control_wq;
index 7da2bb8..edde9c3 100644 (file)
@@ -794,14 +794,14 @@ static void mkiss_close(struct tty_struct *tty)
         */
        netif_stop_queue(ax->dev);
 
-       ax->tty = NULL;
-
        unregister_netdev(ax->dev);
 
        /* Free all AX25 frame buffers after unreg. */
        kfree(ax->rbuff);
        kfree(ax->xbuff);
 
+       ax->tty = NULL;
+
        free_netdev(ax->dev);
 }
 
index 23ee0b1..2f5e7b3 100644 (file)
@@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
 
        ret = usb_control_msg(usb_dev, pipe, request, requesttype,
                              value, index, data, size, timeout);
-       if (ret < 0) {
+       if (ret < size) {
+               ret = ret < 0 ? ret : -ENODATA;
+
                atusb->err = ret;
                dev_err(&usb_dev->dev,
                        "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
@@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
        if (!build)
                return -ENOMEM;
 
-       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
-                               ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
-                               build, ATUSB_BUILD_SIZE, 1000);
+       /* We cannot call atusb_control_msg() here, since this request may read various length data */
+       ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
+                             ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
        if (ret >= 0) {
                build[ret] = 0;
                dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
index 90aafb5..a438202 100644 (file)
@@ -514,6 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
                                goto err_free;
                        key = nmap->entry[i].key;
                        *key = i;
+                       memset(nmap->entry[i].value, 0, offmap->map.value_size);
                }
        }
 
index 0ab6a40..a6a713b 100644 (file)
@@ -77,7 +77,10 @@ static int nsim_set_ringparam(struct net_device *dev,
 {
        struct netdevsim *ns = netdev_priv(dev);
 
-       memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring));
+       ns->ethtool.ring.rx_pending = ring->rx_pending;
+       ns->ethtool.ring.rx_jumbo_pending = ring->rx_jumbo_pending;
+       ns->ethtool.ring.rx_mini_pending = ring->rx_mini_pending;
+       ns->ethtool.ring.tx_pending = ring->tx_pending;
        return 0;
 }
 
index c204067..c198722 100644 (file)
@@ -460,6 +460,9 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
 
                if (addr == mdiodev->addr) {
                        device_set_node(dev, of_fwnode_handle(child));
+                       /* The refcount on "child" is passed to the mdio
+                        * device. Do _not_ use of_node_put(child) here.
+                        */
                        return;
                }
        }
index 5904546..ea82ea5 100644 (file)
@@ -1388,6 +1388,7 @@ EXPORT_SYMBOL_GPL(phylink_stop);
  * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
  *
  * Handle a network device suspend event. There are several cases:
+ *
  * - If Wake-on-Lan is not active, we can bring down the link between
  *   the MAC and PHY by calling phylink_stop().
  * - If Wake-on-Lan is active, and being handled only by the PHY, we
index 1572878..45a67e7 100644 (file)
@@ -209,6 +209,9 @@ struct tun_struct {
        struct tun_prog __rcu *steering_prog;
        struct tun_prog __rcu *filter_prog;
        struct ethtool_link_ksettings link_ksettings;
+       /* init args */
+       struct file *file;
+       struct ifreq *ifr;
 };
 
 struct veth {
@@ -216,6 +219,9 @@ struct veth {
        __be16 h_vlan_TCI;
 };
 
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
 static int tun_napi_receive(struct napi_struct *napi, int budget)
 {
        struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -953,6 +959,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 
 static const struct ethtool_ops tun_ethtool_ops;
 
+static int tun_net_init(struct net_device *dev)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+       struct ifreq *ifr = tun->ifr;
+       int err;
+
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       spin_lock_init(&tun->lock);
+
+       err = security_tun_dev_alloc_security(&tun->security);
+       if (err < 0) {
+               free_percpu(dev->tstats);
+               return err;
+       }
+
+       tun_flow_init(tun);
+
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+                          TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+                          NETIF_F_HW_VLAN_STAG_TX;
+       dev->features = dev->hw_features | NETIF_F_LLTX;
+       dev->vlan_features = dev->features &
+                            ~(NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_HW_VLAN_STAG_TX);
+
+       tun->flags = (tun->flags & ~TUN_FEATURES) |
+                     (ifr->ifr_flags & TUN_FEATURES);
+
+       INIT_LIST_HEAD(&tun->disabled);
+       err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+                        ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+       if (err < 0) {
+               tun_flow_uninit(tun);
+               security_tun_dev_free_security(tun->security);
+               free_percpu(dev->tstats);
+               return err;
+       }
+       return 0;
+}
+
 /* Net device detach from fd. */
 static void tun_net_uninit(struct net_device *dev)
 {
@@ -1169,6 +1218,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
 }
 
 static const struct net_device_ops tun_netdev_ops = {
+       .ndo_init               = tun_net_init,
        .ndo_uninit             = tun_net_uninit,
        .ndo_open               = tun_net_open,
        .ndo_stop               = tun_net_close,
@@ -1252,6 +1302,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
 }
 
 static const struct net_device_ops tap_netdev_ops = {
+       .ndo_init               = tun_net_init,
        .ndo_uninit             = tun_net_uninit,
        .ndo_open               = tun_net_open,
        .ndo_stop               = tun_net_close,
@@ -1292,7 +1343,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
 #define MAX_MTU 65535
 
 /* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
@@ -2206,11 +2257,6 @@ static void tun_free_netdev(struct net_device *dev)
        BUG_ON(!(list_empty(&tun->disabled)));
 
        free_percpu(dev->tstats);
-       /* We clear tstats so that tun_set_iff() can tell if
-        * tun_free_netdev() has been called from register_netdevice().
-        */
-       dev->tstats = NULL;
-
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
        __tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2716,41 +2762,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                tun->rx_batched = 0;
                RCU_INIT_POINTER(tun->steering_prog, NULL);
 
-               dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-               if (!dev->tstats) {
-                       err = -ENOMEM;
-                       goto err_free_dev;
-               }
-
-               spin_lock_init(&tun->lock);
-
-               err = security_tun_dev_alloc_security(&tun->security);
-               if (err < 0)
-                       goto err_free_stat;
-
-               tun_net_init(dev);
-               tun_flow_init(tun);
+               tun->ifr = ifr;
+               tun->file = file;
 
-               dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
-                                  TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
-                                  NETIF_F_HW_VLAN_STAG_TX;
-               dev->features = dev->hw_features | NETIF_F_LLTX;
-               dev->vlan_features = dev->features &
-                                    ~(NETIF_F_HW_VLAN_CTAG_TX |
-                                      NETIF_F_HW_VLAN_STAG_TX);
-
-               tun->flags = (tun->flags & ~TUN_FEATURES) |
-                             (ifr->ifr_flags & TUN_FEATURES);
-
-               INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
-                                ifr->ifr_flags & IFF_NAPI_FRAGS, false);
-               if (err < 0)
-                       goto err_free_flow;
+               tun_net_initialize(dev);
 
                err = register_netdevice(tun->dev);
-               if (err < 0)
-                       goto err_detach;
+               if (err < 0) {
+                       free_netdev(dev);
+                       return err;
+               }
                /* free_netdev() won't check refcnt, to avoid race
                 * with dev_put() we need publish tun after registration.
                 */
@@ -2767,24 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
        strcpy(ifr->ifr_name, tun->dev->name);
        return 0;
-
-err_detach:
-       tun_detach_all(dev);
-       /* We are here because register_netdevice() has failed.
-        * If register_netdevice() already called tun_free_netdev()
-        * while dealing with the error, dev->stats has been cleared.
-        */
-       if (!dev->tstats)
-               goto err_free_dev;
-
-err_free_flow:
-       tun_flow_uninit(tun);
-       security_tun_dev_free_security(tun->security);
-err_free_stat:
-       free_percpu(dev->tstats);
-err_free_dev:
-       free_netdev(dev);
-       return err;
 }
 
 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
index 42ba4af..7168297 100644 (file)
@@ -9,6 +9,8 @@
 
 #include "asix.h"
 
+#define AX_HOST_EN_RETRIES     30
+
 int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                  u16 size, void *data, int in_pm)
 {
@@ -68,7 +70,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
        int i, ret;
        u8 smsr;
 
-       for (i = 0; i < 30; ++i) {
+       for (i = 0; i < AX_HOST_EN_RETRIES; ++i) {
                ret = asix_set_sw_mii(dev, in_pm);
                if (ret == -ENODEV || ret == -ETIMEDOUT)
                        break;
@@ -77,13 +79,13 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
                                    0, 0, 1, &smsr, in_pm);
                if (ret == -ENODEV)
                        break;
-               else if (ret < 0)
+               else if (ret < sizeof(smsr))
                        continue;
                else if (smsr & AX_HOST_EN)
                        break;
        }
 
-       return ret;
+       return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret;
 }
 
 static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
index 24753a4..e303b52 100644 (file)
@@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
                min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
 
        max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+       if (max == 0)
+               max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
 
        /* some devices set dwNtbOutMaxSize too low for the above default */
        min = min(min, max);
index 8cd265f..075f8ab 100644 (file)
@@ -76,6 +76,8 @@
 #define LAN7801_USB_PRODUCT_ID         (0x7801)
 #define LAN78XX_EEPROM_MAGIC           (0x78A5)
 #define LAN78XX_OTP_MAGIC              (0x78F3)
+#define AT29M2AF_USB_VENDOR_ID         (0x07C9)
+#define AT29M2AF_USB_PRODUCT_ID        (0x0012)
 
 #define        MII_READ                        1
 #define        MII_WRITE                       0
@@ -4734,6 +4736,10 @@ static const struct usb_device_id products[] = {
        /* LAN7801 USB Gigabit Ethernet Device */
        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
        },
+       {
+       /* ATM2-AF USB Gigabit Ethernet Device */
+       USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
+       },
        {},
 };
 MODULE_DEVICE_TABLE(usb, products);
index c4cd40b..feb247e 100644 (file)
@@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb)
                goto goon;
 
        rx_status = buf[count - 2];
-       if (rx_status & 0x1e) {
+       if (rx_status & 0x1c) {
                netif_dbg(pegasus, rx_err, net,
                          "RX packet error %x\n", rx_status);
                net->stats.rx_errors++;
-               if (rx_status & 0x06)   /* long or runt */
+               if (rx_status & 0x04)   /* runt */
                        net->stats.rx_length_errors++;
                if (rx_status & 0x08)
                        net->stats.rx_crc_errors++;
index 86b814e..f510e82 100644 (file)
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
index f9877a3..ef6010a 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "12"
 
 /* Information for net */
-#define NET_VERSION            "11"
+#define NET_VERSION            "12"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -4016,6 +4016,11 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
        ocp_write_word(tp, type, PLA_BP_BA, 0);
 }
 
+static inline void rtl_reset_ocp_base(struct r8152 *tp)
+{
+       tp->ocp_base = -1;
+}
+
 static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
 {
        u16 data, check;
@@ -4087,8 +4092,6 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
 
        rtl_phy_patch_request(tp, false, wait);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
-
        return 0;
 }
 
@@ -4800,6 +4803,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
        u32 len;
        u8 *data;
 
+       rtl_reset_ocp_base(tp);
+
        if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) {
                dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
                return;
@@ -4845,7 +4850,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
                }
        }
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+       rtl_reset_ocp_base(tp);
+
        rtl_phy_patch_request(tp, false, wait);
 
        if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version))
@@ -4861,6 +4867,8 @@ static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver)
        ver_addr = __le16_to_cpu(phy_ver->ver.addr);
        ver = __le16_to_cpu(phy_ver->ver.data);
 
+       rtl_reset_ocp_base(tp);
+
        if (sram_read(tp, ver_addr) >= ver) {
                dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
                return 0;
@@ -4877,6 +4885,8 @@ static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix)
 {
        u16 addr, data;
 
+       rtl_reset_ocp_base(tp);
+
        addr = __le16_to_cpu(fix->setting.addr);
        data = ocp_reg_read(tp, addr);
 
@@ -4908,6 +4918,8 @@ static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *ph
        u32 length;
        int i, num;
 
+       rtl_reset_ocp_base(tp);
+
        num = phy->pre_num;
        for (i = 0; i < num; i++)
                sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr),
@@ -4938,6 +4950,8 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
        u32 length, i, num;
        __le16 *data;
 
+       rtl_reset_ocp_base(tp);
+
        mode_reg = __le16_to_cpu(phy->mode_reg);
        sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
        sram_write(tp, __le16_to_cpu(phy->ba_reg),
@@ -5107,6 +5121,7 @@ post_fw:
        if (rtl_fw->post_fw)
                rtl_fw->post_fw(tp);
 
+       rtl_reset_ocp_base(tp);
        strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
        dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
 }
@@ -6584,6 +6599,21 @@ static bool rtl8153_in_nway(struct r8152 *tp)
                return true;
 }
 
+static void r8156_mdio_force_mode(struct r8152 *tp)
+{
+       u16 data;
+
+       /* Select force mode through 0xa5b4 bit 15
+        * 0: MDIO force mode
+        * 1: MMD force mode
+        */
+       data = ocp_reg_read(tp, 0xa5b4);
+       if (data & BIT(15)) {
+               data &= ~BIT(15);
+               ocp_reg_write(tp, 0xa5b4, data);
+       }
+}
+
 static void set_carrier(struct r8152 *tp)
 {
        struct net_device *netdev = tp->netdev;
@@ -8016,6 +8046,7 @@ static void r8156_init(struct r8152 *tp)
        ocp_data |= ACT_ODMA;
        ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
 
+       r8156_mdio_force_mode(tp);
        rtl_tally_reset(tp);
 
        tp->coalesce = 15000;   /* 15 us */
@@ -8145,6 +8176,7 @@ static void r8156b_init(struct r8152 *tp)
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 
+       r8156_mdio_force_mode(tp);
        rtl_tally_reset(tp);
 
        tp->coalesce = 15000;   /* 15 us */
@@ -8467,6 +8499,8 @@ static int rtl8152_resume(struct usb_interface *intf)
 
        mutex_lock(&tp->control);
 
+       rtl_reset_ocp_base(tp);
+
        if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
                ret = rtl8152_runtime_resume(tp);
        else
@@ -8482,6 +8516,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
 
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+       rtl_reset_ocp_base(tp);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
        set_ethernet_addr(tp, true);
@@ -9603,9 +9638,12 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
-       if (udev->parent &&
-                       le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
-               tp->lenovo_macpassthru = 1;
+       if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+               switch (le16_to_cpu(udev->descriptor.idProduct)) {
+               case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+               case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+                       tp->lenovo_macpassthru = 1;
+               }
        }
 
        if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
index 4a84f90..247f58c 100644 (file)
@@ -608,6 +608,11 @@ static const struct usb_device_id  products [] = {
        USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
                                      USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
        .driver_info = (unsigned long) &rndis_poll_status_info,
+}, {
+       /* Hytera Communications DMR radios' "Radio to PC Network" */
+       USB_VENDOR_AND_INTERFACE_INFO(0x238b,
+                                     USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+       .driver_info = (unsigned long)&rndis_info,
 }, {
        /* RNDIS is MSFT's un-official variant of CDC ACM */
        USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
index 50eb43e..2acdb8a 100644 (file)
@@ -879,8 +879,12 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
 
                        stats->xdp_bytes += skb->len;
                        skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
-                       if (skb)
-                               napi_gro_receive(&rq->xdp_napi, skb);
+                       if (skb) {
+                               if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
+                                       netif_receive_skb(skb);
+                               else
+                                       napi_gro_receive(&rq->xdp_napi, skb);
+                       }
                }
                done++;
        }
index 55db6a3..b107835 100644 (file)
@@ -733,7 +733,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                pr_debug("%s: rx error: len %u exceeds max size %d\n",
                         dev->name, len, GOOD_PACKET_LEN);
                dev->stats.rx_length_errors++;
-               goto err_len;
+               goto err;
        }
 
        if (likely(!vi->xdp_enabled)) {
@@ -825,10 +825,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
 
 skip_xdp:
        skb = build_skb(buf, buflen);
-       if (!skb) {
-               put_page(page);
+       if (!skb)
                goto err;
-       }
        skb_reserve(skb, headroom - delta);
        skb_put(skb, len);
        if (!xdp_prog) {
@@ -839,13 +837,12 @@ skip_xdp:
        if (metasize)
                skb_metadata_set(skb, metasize);
 
-err:
        return skb;
 
 err_xdp:
        rcu_read_unlock();
        stats->xdp_drops++;
-err_len:
+err:
        stats->drops++;
        put_page(page);
 xdp_xmit:
index 14fae31..fd407c0 100644 (file)
@@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
 
 #ifdef CONFIG_PCI_MSI
        if (adapter->intr.type == VMXNET3_IT_MSIX) {
-               int i, nvec;
+               int i, nvec, nvec_allocated;
 
                nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
                        1 : adapter->num_tx_queues;
@@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                for (i = 0; i < nvec; i++)
                        adapter->intr.msix_entries[i].entry = i;
 
-               nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
-               if (nvec < 0)
+               nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
+               if (nvec_allocated < 0)
                        goto msix_err;
 
                /* If we cannot allocate one MSIx vector per queue
                 * then limit the number of rx queues to 1
                 */
-               if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
+               if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
+                   nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
                        if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
                            || adapter->num_rx_queues != 1) {
                                adapter->share_intr = VMXNET3_INTR_TXSHARE;
@@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                        }
                }
 
-               adapter->intr.num_intrs = nvec;
+               adapter->intr.num_intrs = nvec_allocated;
                return;
 
 msix_err:
                /* If we cannot allocate MSIx vectors use only one rx queue */
                dev_info(&adapter->pdev->dev,
                         "Failed to enable MSI-X, error %d. "
-                        "Limiting #rx queues to 1, try MSI.\n", nvec);
+                        "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
 
                adapter->intr.type = VMXNET3_IT_MSI;
        }
index 131c745..b2242a0 100644 (file)
@@ -770,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
 
        skb->dev = vrf_dev;
 
-       vrf_nf_set_untracked(skb);
-
        err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
                      skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
 
@@ -792,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
        if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
                return skb;
 
+       vrf_nf_set_untracked(skb);
+
        if (qdisc_tx_is_default(vrf_dev) ||
            IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
                return vrf_ip6_out_direct(vrf_dev, sk, skb);
@@ -1000,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
 
        skb->dev = vrf_dev;
 
-       vrf_nf_set_untracked(skb);
-
        err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
                      skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
 
@@ -1023,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
            ipv4_is_lbcast(ip_hdr(skb)->daddr))
                return skb;
 
+       vrf_nf_set_untracked(skb);
+
        if (qdisc_tx_is_default(vrf_dev) ||
            IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
                return vrf_ip_out_direct(vrf_dev, sk, skb);
index 26c7ae2..49c0b1a 100644 (file)
@@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
                ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_RESUME:
-               ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+               /* Do force MHI resume as some devices like QCA6390, WCN6855
+                * are not in M3 state but they are functional. So just ignore
+                * the MHI state while resuming.
+                */
+               ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_TRIGGER_RDDM:
                ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
index 5bf2318..3a1a35b 100644 (file)
@@ -7,16 +7,20 @@ config BRCMSMAC
        depends on MAC80211
        depends on BCMA_POSSIBLE
        select BCMA
-       select NEW_LEDS if BCMA_DRIVER_GPIO
-       select LEDS_CLASS if BCMA_DRIVER_GPIO
        select BRCMUTIL
        select FW_LOADER
        select CORDIC
        help
          This module adds support for PCIe wireless adapters based on Broadcom
-         IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
-         be available if you select BCMA_DRIVER_GPIO. If you choose to build a
-         module, the driver will be called brcmsmac.ko.
+         IEEE802.11n SoftMAC chipsets. If you choose to build a module, the
+         driver will be called brcmsmac.ko.
+
+config BRCMSMAC_LEDS
+       def_bool BRCMSMAC && BCMA_DRIVER_GPIO && MAC80211_LEDS
+       help
+         The brcmsmac LED support depends on the presence of the
+         BCMA_DRIVER_GPIO driver, and it only works if LED support
+         is enabled and reachable from the driver module.
 
 source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
 
index 482d773..0907577 100644 (file)
@@ -42,6 +42,6 @@ brcmsmac-y := \
        brcms_trace_events.o \
        debug.o
 
-brcmsmac-$(CONFIG_BCMA_DRIVER_GPIO) += led.o
+brcmsmac-$(CONFIG_BRCMSMAC_LEDS) += led.o
 
 obj-$(CONFIG_BRCMSMAC) += brcmsmac.o
index d65f5c2..2a5cbeb 100644 (file)
@@ -24,7 +24,7 @@ struct brcms_led {
        struct gpio_desc *gpiod;
 };
 
-#ifdef CONFIG_BCMA_DRIVER_GPIO
+#ifdef CONFIG_BRCMSMAC_LEDS
 void brcms_led_unregister(struct brcms_info *wl);
 int brcms_led_register(struct brcms_info *wl);
 #else
index 24fe3f6..7eacc8e 100644 (file)
@@ -2,14 +2,13 @@
 config IWLEGACY
        tristate
        select FW_LOADER
-       select NEW_LEDS
-       select LEDS_CLASS
        select LEDS_TRIGGERS
        select MAC80211_LEDS
 
 config IWL4965
        tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
        depends on PCI && MAC80211
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select IWLEGACY
        help
          This option enables support for
@@ -38,6 +37,7 @@ config IWL4965
 config IWL3945
        tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
        depends on PCI && MAC80211
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select IWLEGACY
        help
          Select to build the driver supporting the:
index 1085afb..418ae4f 100644 (file)
@@ -47,7 +47,7 @@ if IWLWIFI
 
 config IWLWIFI_LEDS
        bool
-       depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        depends on IWLMVM || IWLDVM
        select LEDS_TRIGGERS
        select MAC80211_LEDS
index bdd4ee4..76e0b7b 100644 (file)
@@ -269,17 +269,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
        u8 rate_plcp;
        u32 rate_flags = 0;
        bool is_cck;
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
        /* info->control is only relevant for non HW rate control */
        if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+               struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
                /* HT rate doesn't make sense for a non data frame */
                WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
                          !ieee80211_is_data(fc),
                          "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
                          info->control.rates[0].flags,
                          info->control.rates[0].idx,
-                         le16_to_cpu(fc), mvmsta->sta_state);
+                         le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
 
                rate_idx = info->control.rates[0].idx;
        }
index 79ab850..c78ae4b 100644 (file)
@@ -34,4 +34,4 @@ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
 obj-$(CONFIG_MT7603E) += mt7603/
 obj-$(CONFIG_MT7615_COMMON) += mt7615/
 obj-$(CONFIG_MT7915E) += mt7915/
-obj-$(CONFIG_MT7921E) += mt7921/
+obj-$(CONFIG_MT7921_COMMON) += mt7921/
index cff3b43..12c03da 100644 (file)
@@ -181,9 +181,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
 {
        struct ipc_mem_channel *channel;
+       bool hpda_ctrl_pending = false;
        struct sk_buff_head *ul_list;
        bool hpda_pending = false;
-       bool forced_hpdu = false;
        struct ipc_pipe *pipe;
        int i;
 
@@ -200,15 +200,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
                ul_list = &channel->ul_list;
 
                /* Fill the transfer descriptor with the uplink buffer info. */
-               hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+               if (!ipc_imem_check_wwan_ips(channel)) {
+                       hpda_ctrl_pending |=
+                               ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
                                                        pipe, ul_list);
-
-               /* forced HP update needed for non data channels */
-               if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
-                       forced_hpdu = true;
+               } else {
+                       hpda_pending |=
+                               ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+                                                       pipe, ul_list);
+               }
        }
 
-       if (forced_hpdu) {
+       /* forced HP update needed for non data channels */
+       if (hpda_ctrl_pending) {
                hpda_pending = false;
                ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
                                              IPC_HP_UL_WRITE_TD);
@@ -527,6 +531,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
                return;
        }
 
+       if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+               ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
        if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
                ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
 
@@ -1167,7 +1174,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
                ipc_port_deinit(ipc_imem->ipc_port);
        }
 
-       if (ipc_imem->ipc_devlink)
+       if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
                ipc_devlink_deinit(ipc_imem->ipc_devlink);
 
        ipc_imem_device_ipc_uninit(ipc_imem);
@@ -1263,7 +1270,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
        ipc_imem->pci_device_id = device_id;
 
-       ipc_imem->ev_cdev_write_pending = false;
        ipc_imem->cp_version = 0;
        ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
 
@@ -1331,6 +1337,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
                if (ipc_flash_link_establish(ipc_imem))
                        goto devlink_channel_fail;
+
+               set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
        }
        return ipc_imem;
 devlink_channel_fail:
index 6be6708..6b8a837 100644 (file)
@@ -101,6 +101,7 @@ struct ipc_chnl_cfg;
 #define IOSM_CHIP_INFO_SIZE_MAX 100
 
 #define FULLY_FUNCTIONAL 0
+#define IOSM_DEVLINK_INIT 1
 
 /* List of the supported UL/DL pipes. */
 enum ipc_mem_pipes {
@@ -335,8 +336,6 @@ enum ipc_phase {
  *                             process the irq actions.
  * @flag:                      Flag to monitor the state of driver
  * @td_update_timer_suspended: if true then td update timer suspend
- * @ev_cdev_write_pending:     0 means inform the IPC tasklet to pass
- *                             the accumulated uplink buffers to CP.
  * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
  * @reset_det_n:               Reset detect flag
  * @pcie_wake_n:               Pcie wake flag
@@ -374,7 +373,6 @@ struct iosm_imem {
        u8 ev_irq_pending[IPC_IRQ_VECTORS];
        unsigned long flag;
        u8 td_update_timer_suspended:1,
-          ev_cdev_write_pending:1,
           ev_mux_net_transmit_pending:1,
           reset_det_n:1,
           pcie_wake_n:1;
index 825e8e5..831cdae 100644 (file)
@@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
                                  void *msg, size_t size)
 {
-       ipc_imem->ev_cdev_write_pending = false;
        ipc_imem_ul_send(ipc_imem);
 
        return 0;
@@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
 /* Through tasklet to do sio write. */
 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
 {
-       if (ipc_imem->ev_cdev_write_pending)
-               return -1;
-
-       ipc_imem->ev_cdev_write_pending = true;
-
        return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
                                        NULL, 0, false);
 }
@@ -450,6 +444,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
        /* Release the pipe resources */
        ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
        ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+       ipc_imem->nr_of_channels--;
 }
 
 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
index 4a16d6e..d9dea48 100644 (file)
@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        unsigned int rx_queue_max;
        unsigned int rx_queue_len;
        unsigned long last_rx_time;
+       unsigned int rx_slots_needed;
        bool stalled;
 
        struct xenvif_copy_state rx_copy;
index accc991..dbac4c0 100644 (file)
 #include <xen/xen.h>
 #include <xen/events.h>
 
-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+/*
+ * Update the needed ring page slots for the first SKB queued.
+ * Note that any call sequence outside the RX thread calling this function
+ * needs to wake up the RX thread via a call of xenvif_kick_thread()
+ * afterwards in order to avoid a race with putting the thread to sleep.
+ */
+static void xenvif_update_needed_slots(struct xenvif_queue *queue,
+                                      const struct sk_buff *skb)
 {
-       RING_IDX prod, cons;
-       struct sk_buff *skb;
-       int needed;
-       unsigned long flags;
-
-       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+       unsigned int needed = 0;
 
-       skb = skb_peek(&queue->rx_queue);
-       if (!skb) {
-               spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
-               return false;
+       if (skb) {
+               needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+               if (skb_is_gso(skb))
+                       needed++;
+               if (skb->sw_hash)
+                       needed++;
        }
 
-       needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
-       if (skb_is_gso(skb))
-               needed++;
-       if (skb->sw_hash)
-               needed++;
+       WRITE_ONCE(queue->rx_slots_needed, needed);
+}
 
-       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+{
+       RING_IDX prod, cons;
+       unsigned int needed;
+
+       needed = READ_ONCE(queue->rx_slots_needed);
+       if (!needed)
+               return false;
 
        do {
                prod = queue->rx.sring->req_prod;
@@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
 
        spin_lock_irqsave(&queue->rx_queue.lock, flags);
 
-       __skb_queue_tail(&queue->rx_queue, skb);
-
-       queue->rx_queue_len += skb->len;
-       if (queue->rx_queue_len > queue->rx_queue_max) {
+       if (queue->rx_queue_len >= queue->rx_queue_max) {
                struct net_device *dev = queue->vif->dev;
 
                netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+               kfree_skb(skb);
+               queue->vif->dev->stats.rx_dropped++;
+       } else {
+               if (skb_queue_empty(&queue->rx_queue))
+                       xenvif_update_needed_slots(queue, skb);
+
+               __skb_queue_tail(&queue->rx_queue, skb);
+
+               queue->rx_queue_len += skb->len;
        }
 
        spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
 
        skb = __skb_dequeue(&queue->rx_queue);
        if (skb) {
+               xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
+
                queue->rx_queue_len -= skb->len;
                if (queue->rx_queue_len < queue->rx_queue_max) {
                        struct netdev_queue *txq;
@@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
                        break;
                xenvif_rx_dequeue(queue);
                kfree_skb(skb);
+               queue->vif->dev->stats.rx_dropped++;
        }
 }
 
@@ -487,27 +504,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
        xenvif_rx_copy_flush(queue);
 }
 
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
 {
        RING_IDX prod, cons;
 
        prod = queue->rx.sring->req_prod;
        cons = queue->rx.req_cons;
 
+       return prod - cons;
+}
+
+static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
+{
+       unsigned int needed = READ_ONCE(queue->rx_slots_needed);
+
        return !queue->stalled &&
-               prod - cons < 1 &&
+               xenvif_rx_queue_slots(queue) < needed &&
                time_after(jiffies,
                           queue->last_rx_time + queue->vif->stall_timeout);
 }
 
 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
 {
-       RING_IDX prod, cons;
-
-       prod = queue->rx.sring->req_prod;
-       cons = queue->rx.req_cons;
+       unsigned int needed = READ_ONCE(queue->rx_slots_needed);
 
-       return queue->stalled && prod - cons >= 1;
+       return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
 }
 
 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
index 911f439..d514d96 100644 (file)
@@ -148,6 +148,9 @@ struct netfront_queue {
        grant_ref_t gref_rx_head;
        grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
+       unsigned int rx_rsp_unconsumed;
+       spinlock_t rx_cons_lock;
+
        struct page_pool *page_pool;
        struct xdp_rxq_info xdp_rxq;
 };
@@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev)
        return 0;
 }
 
-static void xennet_tx_buf_gc(struct netfront_queue *queue)
+static bool xennet_tx_buf_gc(struct netfront_queue *queue)
 {
        RING_IDX cons, prod;
        unsigned short id;
        struct sk_buff *skb;
        bool more_to_do;
+       bool work_done = false;
        const struct device *dev = &queue->info->netdev->dev;
 
        BUG_ON(!netif_carrier_ok(queue->info->netdev));
@@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
                for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
                        struct xen_netif_tx_response txrsp;
 
+                       work_done = true;
+
                        RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
                        if (txrsp.status == XEN_NETIF_RSP_NULL)
                                continue;
@@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
 
        xennet_maybe_wake_tx(queue);
 
-       return;
+       return work_done;
 
  err:
        queue->info->broken = true;
        dev_alert(dev, "Disabled for further use\n");
+
+       return work_done;
 }
 
 struct xennet_gnttab_make_txreq {
@@ -834,6 +842,16 @@ static int xennet_close(struct net_device *dev)
        return 0;
 }
 
+static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->rx_cons_lock, flags);
+       queue->rx.rsp_cons = val;
+       queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+       spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+}
+
 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
                                grant_ref_t ref)
 {
@@ -885,7 +903,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
                xennet_move_rx_slot(queue, skb, ref);
        } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 
-       queue->rx.rsp_cons = cons;
+       xennet_set_rx_rsp_cons(queue, cons);
        return err;
 }
 
@@ -1039,7 +1057,7 @@ next:
        }
 
        if (unlikely(err))
-               queue->rx.rsp_cons = cons + slots;
+               xennet_set_rx_rsp_cons(queue, cons + slots);
 
        return err;
 }
@@ -1093,7 +1111,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+                       xennet_set_rx_rsp_cons(queue,
+                                              ++cons + skb_queue_len(list));
                        kfree_skb(nskb);
                        return -ENOENT;
                }
@@ -1106,7 +1125,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
                kfree_skb(nskb);
        }
 
-       queue->rx.rsp_cons = cons;
+       xennet_set_rx_rsp_cons(queue, cons);
 
        return 0;
 }
@@ -1229,7 +1248,9 @@ err:
 
                        if (unlikely(xennet_set_skb_gso(skb, gso))) {
                                __skb_queue_head(&tmpq, skb);
-                               queue->rx.rsp_cons += skb_queue_len(&tmpq);
+                               xennet_set_rx_rsp_cons(queue,
+                                                      queue->rx.rsp_cons +
+                                                      skb_queue_len(&tmpq));
                                goto err;
                        }
                }
@@ -1253,7 +1274,8 @@ err:
 
                __skb_queue_tail(&rxq, skb);
 
-               i = ++queue->rx.rsp_cons;
+               i = queue->rx.rsp_cons + 1;
+               xennet_set_rx_rsp_cons(queue, i);
                work_done++;
        }
        if (need_xdp_flush)
@@ -1417,40 +1439,79 @@ static int xennet_set_features(struct net_device *dev,
        return 0;
 }
 
-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
 {
-       struct netfront_queue *queue = dev_id;
        unsigned long flags;
 
-       if (queue->info->broken)
-               return IRQ_HANDLED;
+       if (unlikely(queue->info->broken))
+               return false;
 
        spin_lock_irqsave(&queue->tx_lock, flags);
-       xennet_tx_buf_gc(queue);
+       if (xennet_tx_buf_gc(queue))
+               *eoi = 0;
        spin_unlock_irqrestore(&queue->tx_lock, flags);
 
+       return true;
+}
+
+static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+{
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+       if (likely(xennet_handle_tx(dev_id, &eoiflag)))
+               xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 }
 
-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
 {
-       struct netfront_queue *queue = dev_id;
-       struct net_device *dev = queue->info->netdev;
+       unsigned int work_queued;
+       unsigned long flags;
 
-       if (queue->info->broken)
-               return IRQ_HANDLED;
+       if (unlikely(queue->info->broken))
+               return false;
+
+       spin_lock_irqsave(&queue->rx_cons_lock, flags);
+       work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+       if (work_queued > queue->rx_rsp_unconsumed) {
+               queue->rx_rsp_unconsumed = work_queued;
+               *eoi = 0;
+       } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
+               const struct device *dev = &queue->info->netdev->dev;
+
+               spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+               dev_alert(dev, "RX producer index going backwards\n");
+               dev_alert(dev, "Disabled for further use\n");
+               queue->info->broken = true;
+               return false;
+       }
+       spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
 
-       if (likely(netif_carrier_ok(dev) &&
-                  RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+       if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
                napi_schedule(&queue->napi);
 
+       return true;
+}
+
+static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+{
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+       if (likely(xennet_handle_rx(dev_id, &eoiflag)))
+               xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 }
 
 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
 {
-       xennet_tx_interrupt(irq, dev_id);
-       xennet_rx_interrupt(irq, dev_id);
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+       if (xennet_handle_tx(dev_id, &eoiflag) &&
+           xennet_handle_rx(dev_id, &eoiflag))
+               xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 }
 
@@ -1768,9 +1829,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
        if (err < 0)
                goto fail;
 
-       err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
-                                       xennet_interrupt,
-                                       0, queue->info->netdev->name, queue);
+       err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+                                               xennet_interrupt, 0,
+                                               queue->info->netdev->name,
+                                               queue);
        if (err < 0)
                goto bind_fail;
        queue->rx_evtchn = queue->tx_evtchn;
@@ -1798,18 +1860,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
 
        snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
                 "%s-tx", queue->name);
-       err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
-                                       xennet_tx_interrupt,
-                                       0, queue->tx_irq_name, queue);
+       err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+                                               xennet_tx_interrupt, 0,
+                                               queue->tx_irq_name, queue);
        if (err < 0)
                goto bind_tx_fail;
        queue->tx_irq = err;
 
        snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
                 "%s-rx", queue->name);
-       err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
-                                       xennet_rx_interrupt,
-                                       0, queue->rx_irq_name, queue);
+       err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
+                                               xennet_rx_interrupt, 0,
+                                               queue->rx_irq_name, queue);
        if (err < 0)
                goto bind_rx_fail;
        queue->rx_irq = err;
@@ -1911,6 +1973,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
 
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
+       spin_lock_init(&queue->rx_cons_lock);
 
        timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
 
index f126ce9..35b32fb 100644 (file)
@@ -524,7 +524,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
        phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
        if (IS_ERR(phy->gpiod_ena)) {
                nfc_err(dev, "Unable to get ENABLE GPIO\n");
-               return PTR_ERR(phy->gpiod_ena);
+               r = PTR_ERR(phy->gpiod_ena);
+               goto out_free;
        }
 
        phy->se_status.is_ese_present =
@@ -535,7 +536,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
        r = st21nfca_hci_platform_init(phy);
        if (r < 0) {
                nfc_err(&client->dev, "Unable to reboot st21nfca\n");
-               return r;
+               goto out_free;
        }
 
        r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
@@ -544,15 +545,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
                                ST21NFCA_HCI_DRIVER_NAME, phy);
        if (r < 0) {
                nfc_err(&client->dev, "Unable to register IRQ handler\n");
-               return r;
+               goto out_free;
        }
 
-       return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
-                                       ST21NFCA_FRAME_HEADROOM,
-                                       ST21NFCA_FRAME_TAILROOM,
-                                       ST21NFCA_HCI_LLC_MAX_PAYLOAD,
-                                       &phy->hdev,
-                                       &phy->se_status);
+       r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+                              ST21NFCA_FRAME_HEADROOM,
+                              ST21NFCA_FRAME_TAILROOM,
+                              ST21NFCA_HCI_LLC_MAX_PAYLOAD,
+                              &phy->hdev,
+                              &phy->se_status);
+       if (r)
+               goto out_free;
+
+       return 0;
+
+out_free:
+       kfree_skb(phy->pending_skb);
+       return r;
 }
 
 static int st21nfca_hci_i2c_remove(struct i2c_client *client)
@@ -563,6 +572,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
 
        if (phy->powered)
                st21nfca_hci_i2c_disable(phy);
+       if (phy->pending_skb)
+               kfree_skb(phy->pending_skb);
 
        return 0;
 }
index 4c63564..1af8a45 100644 (file)
@@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
                struct request *rq)
 {
        if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+           ctrl->state != NVME_CTRL_DELETING &&
            ctrl->state != NVME_CTRL_DEAD &&
            !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
            !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
@@ -1749,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                 */
                if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
                        return -EINVAL;
-               if (ctrl->max_integrity_segments)
-                       ns->features |=
-                               (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+
+               ns->features |= NVME_NS_EXT_LBAS;
+
+               /*
+                * The current fabrics transport drivers support namespace
+                * metadata formats only if nvme_ns_has_pi() returns true.
+                * Suppress support for all other formats so the namespace will
+                * have a 0 capacity and not be usable through the block stack.
+                *
+                * Note, this check will need to be modified if any drivers
+                * gain the ability to use other metadata formats.
+                */
+               if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+                       ns->features |= NVME_NS_METADATA_SUPPORTED;
        } else {
                /*
                 * For PCIe controllers, we can't easily remap the separate
@@ -2696,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
 
                if (tmp->cntlid == ctrl->cntlid) {
                        dev_err(ctrl->device,
-                               "Duplicate cntlid %u with %s, rejecting\n",
-                               ctrl->cntlid, dev_name(tmp->device));
+                               "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+                               ctrl->cntlid, dev_name(tmp->device),
+                               subsys->subnqn);
                        return false;
                }
 
index 7f2071f..13e5d50 100644 (file)
@@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
        }
        if (ana_log_size > ctrl->ana_log_size) {
                nvme_mpath_stop(ctrl);
-               kfree(ctrl->ana_log_buf);
+               nvme_mpath_uninit(ctrl);
                ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
                if (!ctrl->ana_log_buf)
                        return -ENOMEM;
@@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 {
        kfree(ctrl->ana_log_buf);
        ctrl->ana_log_buf = NULL;
+       ctrl->ana_log_size = 0;
 }
index b334af8..9b095ee 100644 (file)
@@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                return true;
        if (ctrl->ops->flags & NVME_F_FABRICS &&
            ctrl->state == NVME_CTRL_DELETING)
-               return true;
+               return queue_live;
        return __nvme_check_ready(ctrl, rq, queue_live);
 }
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
index bfc259e..9f81beb 100644 (file)
@@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
        zone.len = ns->zsze;
        zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
        zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
-       zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+       if (zone.cond == BLK_ZONE_COND_FULL)
+               zone.wp = zone.start + zone.len;
+       else
+               zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
 
        return cb(&zone, idx, data);
 }
index cb6a473..7c1c43c 100644 (file)
@@ -922,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
        size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
        int ret;
 
-       if (!nvme_is_write(cmd->req.cmd) ||
+       /*
+        * This command has not been processed yet, hence we are trying to
+        * figure out if there is still pending data left to receive. If
+        * we don't, we can simply prepare for the next pdu and bail out,
+        * otherwise we will need to prepare a buffer and receive the
+        * stale data before continuing forward.
+        */
+       if (!nvme_is_write(cmd->req.cmd) || !data_len ||
            data_len > cmd->req.port->inline_data_size) {
                nvmet_prepare_receive_pdu(queue);
                return;
index b10f015..2b07677 100644 (file)
@@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child)
 }
 EXPORT_SYMBOL_GPL(of_irq_find_parent);
 
+/*
+ * These interrupt controllers abuse interrupt-map for unspeakable
+ * reasons and rely on the core code to *ignore* it (the drivers do
+ * their own parsing of the property).
+ *
+ * If you think of adding to the list for something *new*, think
+ * again. There is a high chance that you will be sent back to the
+ * drawing board.
+ */
+static const char * const of_irq_imap_abusers[] = {
+       "CBEA,platform-spider-pic",
+       "sti,platform-spider-pic",
+       "realtek,rtl-intc",
+       "fsl,ls1021a-extirq",
+       "fsl,ls1043a-extirq",
+       "fsl,ls1088a-extirq",
+       "renesas,rza1-irqc",
+       NULL,
+};
+
 /**
  * of_irq_parse_raw - Low level interrupt tree parsing
  * @addr:      address specifier (start of "reg" property of the device) in be32 format
@@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                /*
                 * Now check if cursor is an interrupt-controller and
                 * if it is then we are done, unless there is an
-                * interrupt-map which takes precedence.
+                * interrupt-map which takes precedence except on one
+                * of these broken platforms that want to parse
+                * interrupt-map themselves for $reason.
                 */
                bool intc = of_property_read_bool(ipar, "interrupt-controller");
 
                imap = of_get_property(ipar, "interrupt-map", &imaplen);
-               if (imap == NULL && intc) {
+               if (intc &&
+                   (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
                        pr_debug(" -> got it !\n");
                        return 0;
                }
index 93b1411..7fc5135 100644 (file)
@@ -332,8 +332,8 @@ config PCIE_APPLE
          If unsure, say Y if you have an Apple Silicon system.
 
 config PCIE_MT7621
-       tristate "MediaTek MT7621 PCIe Controller"
-       depends on (RALINK && SOC_MT7621) || (MIPS && COMPILE_TEST)
+       bool "MediaTek MT7621 PCIe Controller"
+       depends on SOC_MT7621 || (MIPS && COMPILE_TEST)
        select PHY_MT7621_PCI
        default SOC_MT7621
        help
index c24dab3..722dacd 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/regulator/consumer.h>
+#include <linux/module.h>
 
 #include "pcie-designware.h"
 
index 7b17da2..cfe66bf 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pm_domain.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
+#include <linux/module.h>
 
 #include "pcie-designware.h"
 
index c5300d4..c3b725a 100644 (file)
@@ -32,7 +32,6 @@
 #define PCIE_CORE_DEV_ID_REG                                   0x0
 #define PCIE_CORE_CMD_STATUS_REG                               0x4
 #define PCIE_CORE_DEV_REV_REG                                  0x8
-#define PCIE_CORE_EXP_ROM_BAR_REG                              0x30
 #define PCIE_CORE_PCIEXP_CAP                                   0xc0
 #define PCIE_CORE_ERR_CAPCTL_REG                               0x118
 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX                   BIT(5)
@@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
                *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
                return PCI_BRIDGE_EMUL_HANDLED;
 
-       case PCI_ROM_ADDRESS1:
-               *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
-               return PCI_BRIDGE_EMUL_HANDLED;
-
        case PCI_INTERRUPT_LINE: {
                /*
                 * From the whole 32bit register we support reading from HW only
@@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
                advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
                break;
 
-       case PCI_ROM_ADDRESS1:
-               advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
-               break;
-
        case PCI_INTERRUPT_LINE:
                if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
                        u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
index 1bf4d75..b090924 100644 (file)
@@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
        int ret, i;
 
        reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
-                                      GPIOD_OUT_LOW, "#PERST");
+                                      GPIOD_OUT_LOW, "PERST#");
        if (IS_ERR(reset))
                return PTR_ERR(reset);
 
@@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
 
        rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
 
+       /* Assert PERST# before setting up the clock */
+       gpiod_set_value(reset, 1);
+
        ret = apple_pcie_setup_refclk(pcie, port);
        if (ret < 0)
                return ret;
 
+       /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
+       usleep_range(100, 200);
+
+       /* Deassert PERST# */
        rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
-       gpiod_set_value(reset, 1);
+       gpiod_set_value(reset, 0);
+
+       /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+       msleep(100);
 
        ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
                                         stat & PORT_STATUS_READY, 100, 250000);
index 48e3f4e..d84cf30 100644 (file)
@@ -722,9 +722,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                goto out_disable;
        }
 
-       /* Ensure that all table entries are masked. */
-       msix_mask_all(base, tsize);
-
        ret = msix_setup_entries(dev, base, entries, nvec, affd);
        if (ret)
                goto out_disable;
@@ -751,6 +748,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
        dev->msix_enabled = 1;
+
+       /*
+        * Ensure that all table entries are masked to prevent
+        * stale entries from firing in a crash kernel.
+        *
+        * Done late to deal with a broken Marvell NVME device
+        * which takes the MSI-X mask bits into account even
+        * when MSI-X is disabled, which prevents MSI delivery.
+        */
+       msix_mask_all(base, tsize);
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 
        pcibios_free_irq(dev);
@@ -777,7 +784,7 @@ out_free:
        free_msi_irqs(dev);
 
 out_disable:
-       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
 
        return ret;
 }
index c64c667..0ac9634 100644 (file)
@@ -757,8 +757,8 @@ static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy,
                return PTR_ERR(phy->sysctrl);
 
        phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl");
-       if (IS_ERR(phy->sysctrl))
-               return PTR_ERR(phy->sysctrl);
+       if (IS_ERR(phy->pmctrl))
+               return PTR_ERR(phy->pmctrl);
 
        /* clocks */
        phy->phy_ref_clk = devm_clk_get(dev, "phy_ref");
index 08d178a..aa27c79 100644 (file)
@@ -82,9 +82,9 @@
  * struct mvebu_cp110_utmi - PHY driver data
  *
  * @regs: PHY registers
- * @syscom: Regmap with system controller registers
+ * @syscon: Regmap with system controller registers
  * @dev: device driver handle
- * @caps: PHY capabilities
+ * @ops: phy ops
  */
 struct mvebu_cp110_utmi {
        void __iomem *regs;
index bfff0c8..fec1da4 100644 (file)
@@ -127,12 +127,13 @@ struct phy_drvdata {
 };
 
 /**
- * Write register and read back masked value to confirm it is written
+ * usb_phy_write_readback() - Write register and read back masked value to
+ * confirm it is written
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @offset - register offset.
- * @mask - register bitmask specifying what should be updated
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @offset: register offset.
+ * @mask: register bitmask specifying what should be updated
+ * @val: value to write.
  */
 static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
                                          u32 offset,
@@ -171,11 +172,11 @@ static int wait_for_latch(void __iomem *addr)
 }
 
 /**
- * Write SSPHY register
+ * usb_ss_write_phycreg() - Write SSPHY register
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to write.
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to write.
+ * @val: value to write.
  */
 static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
                                u32 addr, u32 val)
@@ -209,10 +210,11 @@ err_wait:
 }
 
 /**
- * Read SSPHY register.
+ * usb_ss_read_phycreg() - Read SSPHY register.
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to read.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to read.
+ * @val: pointer in which read is store.
  */
 static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
                               u32 addr, u32 *val)
index 456a59d..c96639d 100644 (file)
@@ -2973,6 +2973,9 @@ struct qmp_phy_combo_cfg {
  * @qmp: QMP phy to which this lane belongs
  * @lane_rst: lane's reset controller
  * @mode: current PHY mode
+ * @dp_aux_cfg: Display port aux config
+ * @dp_opts: Display port optional config
+ * @dp_clks: Display port clocks
  */
 struct qmp_phy {
        struct phy *phy;
index 04d18d5..716a777 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Copyright (C) 2016 Linaro Ltd
  */
 #include <linux/module.h>
index 7df6a63..e4f4a9b 100644 (file)
@@ -478,7 +478,7 @@ static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc,
        if (!of_property_read_bool(np, "st,no-lsfs-fb-cap"))
                usbphyc_phy->tune |= LFSCAPEN;
 
-       if (of_property_read_bool(np, "st,slow-hs-slew-rate"))
+       if (of_property_read_bool(np, "st,decrease-hs-slew-rate"))
                usbphyc_phy->tune |= HSDRVSLEW;
 
        ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val);
index 2ff56ce..c1211c4 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * PCIe SERDES driver for AM654x SoC
  *
  * Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
index 126f5b8..b3384c3 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * Wrapper driver for SERDES used in J721E
  *
  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
index ebceb15..3a505fe 100644 (file)
@@ -89,9 +89,9 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
 }
 
 /**
- * omap_usb2_set_comparator - links the comparator present in the system with
- *     this phy
- * @comparator - the companion phy(comparator) for this phy
+ * omap_usb2_set_comparator() - links the comparator present in the system with this phy
+ *
+ * @comparator the companion phy(comparator) for this phy
  *
  * The phy companion driver should call this API passing the phy_companion
  * filled with set_vbus and start_srp to be used by usb phy.
index a63213f..15c1c79 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * tusb1210.c - TUSB1210 USB ULPI PHY driver
  *
  * Copyright (C) 2015 Intel Corporation
index 2abcc6c..b607d10 100644 (file)
@@ -1244,6 +1244,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
                raw_spin_lock_init(&pc->irq_lock[i]);
        }
 
+       pc->pctl_desc = *pdata->pctl_desc;
+       pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+       if (IS_ERR(pc->pctl_dev)) {
+               gpiochip_remove(&pc->gpio_chip);
+               return PTR_ERR(pc->pctl_dev);
+       }
+
+       pc->gpio_range = *pdata->gpio_range;
+       pc->gpio_range.base = pc->gpio_chip.base;
+       pc->gpio_range.gc = &pc->gpio_chip;
+       pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
        girq = &pc->gpio_chip.irq;
        girq->chip = &bcm2835_gpio_irq_chip;
        girq->parent_handler = bcm2835_gpio_irq_handler;
@@ -1251,8 +1263,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
        girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
                                     sizeof(*girq->parents),
                                     GFP_KERNEL);
-       if (!girq->parents)
+       if (!girq->parents) {
+               pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
                return -ENOMEM;
+       }
 
        if (is_7211) {
                pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
@@ -1307,21 +1321,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
        err = gpiochip_add_data(&pc->gpio_chip, pc);
        if (err) {
                dev_err(dev, "could not add GPIO chip\n");
+               pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
                return err;
        }
 
-       pc->pctl_desc = *pdata->pctl_desc;
-       pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
-       if (IS_ERR(pc->pctl_dev)) {
-               gpiochip_remove(&pc->gpio_chip);
-               return PTR_ERR(pc->pctl_dev);
-       }
-
-       pc->gpio_range = *pdata->gpio_range;
-       pc->gpio_range.base = pc->gpio_chip.base;
-       pc->gpio_range.gc = &pc->gpio_chip;
-       pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
-
        return 0;
 }
 
index 91553b2..5377982 100644 (file)
@@ -285,8 +285,12 @@ static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
        desc = (const struct mtk_pin_desc *)hw->soc->pins;
        *gpio_chip = &hw->chip;
 
-       /* Be greedy to guess first gpio_n is equal to eint_n */
-       if (desc[eint_n].eint.eint_n == eint_n)
+       /*
+        * Be greedy to guess first gpio_n is equal to eint_n.
+        * Only eint virtual eint number is greater than gpio number.
+        */
+       if (hw->soc->npins > eint_n &&
+           desc[eint_n].eint.eint_n == eint_n)
                *gpio_n = eint_n;
        else
                *gpio_n = mtk_xt_find_eint_num(hw, eint_n);
index 24764eb..9ed7647 100644 (file)
@@ -1251,10 +1251,10 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
                bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
                bank->gpio_chip.base = args.args[1];
 
-               npins = args.args[2];
-               while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
-                                                        ++i, &args))
-                       npins += args.args[2];
+               /* get the last defined gpio line (offset + nb of pins) */
+               npins = args.args[0] + args.args[2];
+               while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, ++i, &args))
+                       npins = max(npins, (int)(args.args[0] + args.args[2]));
        } else {
                bank_nr = pctl->nbanks;
                bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
index 04bc3b5..65b4a81 100644 (file)
@@ -1374,8 +1374,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
                pmc->block[i].counters = info[2];
                pmc->block[i].type = info[3];
 
-               if (IS_ERR(pmc->block[i].mmio_base))
-                       return PTR_ERR(pmc->block[i].mmio_base);
+               if (!pmc->block[i].mmio_base)
+                       return -ENOMEM;
 
                ret = mlxbf_pmc_create_groups(dev, i);
                if (ret)
index 2194780..253a096 100644 (file)
@@ -68,7 +68,7 @@ obj-$(CONFIG_THINKPAD_ACPI)   += thinkpad_acpi.o
 obj-$(CONFIG_THINKPAD_LMI)     += think-lmi.o
 
 # Intel
-obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL)               += intel/
+obj-y                          += intel/
 
 # MSI
 obj-$(CONFIG_MSI_LAPTOP)       += msi-laptop.o
index b7e50ed..230593a 100644 (file)
@@ -76,7 +76,7 @@
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
 #define AMD_CPU_ID_YC                  0x14B5
 
-#define PMC_MSG_DELAY_MIN_US           100
+#define PMC_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
 
 #define SOC_SUBSYSTEM_IP_MAX   12
@@ -508,7 +508,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops amd_pmc_pm_ops = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(amd_pmc_suspend, amd_pmc_resume)
+       .suspend_noirq = amd_pmc_suspend,
+       .resume_noirq = amd_pmc_resume,
 };
 
 static const struct pci_device_id pmc_pci_ids[] = {
index 9aae45a..57553f9 100644 (file)
@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
        }
 
        gmux_data->iostart = res->start;
-       gmux_data->iolen = res->end - res->start;
+       gmux_data->iolen = resource_size(res);
 
        if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
                pr_err("gmux I/O region too small (%lu < %u)\n",
index 38ce3e3..40096b2 100644 (file)
@@ -3,19 +3,6 @@
 # Intel x86 Platform Specific Drivers
 #
 
-menuconfig X86_PLATFORM_DRIVERS_INTEL
-       bool "Intel x86 Platform Specific Device Drivers"
-       default y
-       help
-         Say Y here to get to see options for device drivers for
-         various Intel x86 platforms, including vendor-specific
-         drivers. This option alone does not add any kernel code.
-
-         If you say N, all options in this submenu will be skipped
-         and disabled.
-
-if X86_PLATFORM_DRIVERS_INTEL
-
 source "drivers/platform/x86/intel/atomisp2/Kconfig"
 source "drivers/platform/x86/intel/int1092/Kconfig"
 source "drivers/platform/x86/intel/int33fe/Kconfig"
@@ -183,5 +170,3 @@ config INTEL_UNCORE_FREQ_CONTROL
 
          To compile this driver as a module, choose M here: the module
          will be called intel-uncore-frequency.
-
-endif # X86_PLATFORM_DRIVERS_INTEL
index 0859894..13f8cf7 100644 (file)
@@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = {
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
                },
        },
+       {
+               .ident = "Microsoft Surface Go 3",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+               },
+       },
        { }
 };
 
index 7379768..15ca8af 100644 (file)
@@ -65,7 +65,7 @@ static int __init pmc_core_platform_init(void)
 
        retval = platform_device_register(pmc_core_device);
        if (retval)
-               kfree(pmc_core_device);
+               platform_device_put(pmc_core_device);
 
        return retval;
 }
index ae92930..a91847a 100644 (file)
@@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device)
        if (product && strlen(product) > 4)
                switch (product[4]) {
                case '5':
+                       if (strlen(product) > 5)
+                               switch (product[5]) {
+                               case 'N':
+                                       year = 2021;
+                                       break;
+                               case '0':
+                                       year = 2016;
+                                       break;
+                               default:
+                                       year = 2022;
+                               }
+                       break;
                case '6':
                        year = 2016;
                        break;
index 8b292ee..7299ad0 100644 (file)
@@ -35,6 +35,7 @@ struct system76_data {
        union acpi_object *nfan;
        union acpi_object *ntmp;
        struct input_dev *input;
+       bool has_open_ec;
 };
 
 static const struct acpi_device_id device_ids[] = {
@@ -279,20 +280,12 @@ static struct acpi_battery_hook system76_battery_hook = {
 
 static void system76_battery_init(void)
 {
-       acpi_handle handle;
-
-       handle = ec_get_handle();
-       if (handle && acpi_has_method(handle, "GBCT"))
-               battery_hook_register(&system76_battery_hook);
+       battery_hook_register(&system76_battery_hook);
 }
 
 static void system76_battery_exit(void)
 {
-       acpi_handle handle;
-
-       handle = ec_get_handle();
-       if (handle && acpi_has_method(handle, "GBCT"))
-               battery_hook_unregister(&system76_battery_hook);
+       battery_hook_unregister(&system76_battery_hook);
 }
 
 // Get the airplane mode LED brightness
@@ -673,6 +666,10 @@ static int system76_add(struct acpi_device *acpi_dev)
        acpi_dev->driver_data = data;
        data->acpi_dev = acpi_dev;
 
+       // Some models do not run open EC firmware. Check for an ACPI method
+       // that only exists on open EC to guard functionality specific to it.
+       data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN");
+
        err = system76_get(data, "INIT");
        if (err)
                return err;
@@ -718,27 +715,31 @@ static int system76_add(struct acpi_device *acpi_dev)
        if (err)
                goto error;
 
-       err = system76_get_object(data, "NFAN", &data->nfan);
-       if (err)
-               goto error;
+       if (data->has_open_ec) {
+               err = system76_get_object(data, "NFAN", &data->nfan);
+               if (err)
+                       goto error;
 
-       err = system76_get_object(data, "NTMP", &data->ntmp);
-       if (err)
-               goto error;
+               err = system76_get_object(data, "NTMP", &data->ntmp);
+               if (err)
+                       goto error;
 
-       data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
-               "system76_acpi", data, &thermal_chip_info, NULL);
-       err = PTR_ERR_OR_ZERO(data->therm);
-       if (err)
-               goto error;
+               data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
+                       "system76_acpi", data, &thermal_chip_info, NULL);
+               err = PTR_ERR_OR_ZERO(data->therm);
+               if (err)
+                       goto error;
 
-       system76_battery_init();
+               system76_battery_init();
+       }
 
        return 0;
 
 error:
-       kfree(data->ntmp);
-       kfree(data->nfan);
+       if (data->has_open_ec) {
+               kfree(data->ntmp);
+               kfree(data->nfan);
+       }
        return err;
 }
 
@@ -749,14 +750,15 @@ static int system76_remove(struct acpi_device *acpi_dev)
 
        data = acpi_driver_data(acpi_dev);
 
-       system76_battery_exit();
+       if (data->has_open_ec) {
+               system76_battery_exit();
+               kfree(data->nfan);
+               kfree(data->ntmp);
+       }
 
        devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
        devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
 
-       kfree(data->nfan);
-       kfree(data->ntmp);
-
        system76_get(data, "FINI");
 
        return 0;
index b3ac9c3..bb1abb9 100644 (file)
@@ -3015,6 +3015,8 @@ static struct attribute *hotkey_attributes[] = {
        &dev_attr_hotkey_all_mask.attr,
        &dev_attr_hotkey_adaptive_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
+       &dev_attr_hotkey_tablet_mode.attr,
+       &dev_attr_hotkey_radio_sw.attr,
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
        &dev_attr_hotkey_source_mask.attr,
        &dev_attr_hotkey_poll_freq.attr,
@@ -5726,11 +5728,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
        "tpacpi::standby",
        "tpacpi::dock_status1",
        "tpacpi::dock_status2",
-       "tpacpi::unknown_led2",
+       "tpacpi::lid_logo_dot",
        "tpacpi::unknown_led3",
        "tpacpi::thinkvantage",
 };
-#define TPACPI_SAFE_LEDS       0x1081U
+#define TPACPI_SAFE_LEDS       0x1481U
 
 static inline bool tpacpi_is_led_restricted(const unsigned int led)
 {
index fa88120..17dd54d 100644 (file)
@@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = {
        .properties = trekstor_primetab_t13b_props,
 };
 
+static const struct property_entry trekstor_surftab_duo_w1_props[] = {
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+       { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
+       .acpi_name      = "GDIX1001:00",
+       .properties     = trekstor_surftab_duo_w1_props,
+};
+
 static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
@@ -1502,6 +1512,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"),
                },
        },
+       {
+               /* TrekStor SurfTab duo W1 10.1 ST10432-10b */
+               .driver_data = (void *)&trekstor_surftab_duo_w1_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+               },
+       },
        {
                /* TrekStor SurfTab twin 10.1 ST10432-8 */
                .driver_data = (void *)&trekstor_surftab_twin_10_1_data,
index 4c5bba5..24d3395 100644 (file)
@@ -20,7 +20,6 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
        struct mrq_reset_request request;
        struct tegra_bpmp_message msg;
-       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd = command;
@@ -31,13 +30,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        msg.tx.data = &request;
        msg.tx.size = sizeof(request);
 
-       err = tegra_bpmp_transfer(bpmp, &msg);
-       if (err)
-               return err;
-       if (msg.rx.ret)
-               return -EINVAL;
-
-       return 0;
+       return tegra_bpmp_transfer(bpmp, &msg);
 }
 
 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
index 284b939..059dae8 100644 (file)
@@ -3100,6 +3100,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
+       char *tmp_persistent_address = conn->persistent_address;
+       char *tmp_local_ipaddr = conn->local_ipaddr;
 
        del_timer_sync(&conn->transport_timer);
 
@@ -3121,8 +3123,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        spin_lock_bh(&session->frwd_lock);
        free_pages((unsigned long) conn->data,
                   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
-       kfree(conn->persistent_address);
-       kfree(conn->local_ipaddr);
        /* regular RX path uses back_lock */
        spin_lock_bh(&session->back_lock);
        kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
@@ -3134,6 +3134,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        mutex_unlock(&session->eh_mutex);
 
        iscsi_destroy_conn(cls_conn);
+       kfree(tmp_persistent_address);
+       kfree(tmp_local_ipaddr);
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
 
index bd6d459..08b2e85 100644 (file)
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
        char mybuf[64];
        char *pbuf;
 
-       if (nbytes > 64)
-               nbytes = 64;
+       if (nbytes > 63)
+               nbytes = 63;
 
        memset(mybuf, 0, sizeof(mybuf));
 
index ba17a8f..9457552 100644 (file)
@@ -12709,7 +12709,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
        cpumask_clear(&eqhdl->aff_mask);
        cpumask_set_cpu(cpu, &eqhdl->aff_mask);
        irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
-       irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+       irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
 }
 
 /**
@@ -12998,7 +12998,6 @@ cfg_fail_out:
        for (--index; index >= 0; index--) {
                eqhdl = lpfc_get_eq_hdl(index);
                lpfc_irq_clear_aff(eqhdl);
-               irq_set_affinity_hint(eqhdl->irq, NULL);
                free_irq(eqhdl->irq, eqhdl);
        }
 
@@ -13159,7 +13158,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
                for (index = 0; index < phba->cfg_irq_chann; index++) {
                        eqhdl = lpfc_get_eq_hdl(index);
                        lpfc_irq_clear_aff(eqhdl);
-                       irq_set_affinity_hint(eqhdl->irq, NULL);
                        free_irq(eqhdl->irq, eqhdl);
                }
        } else {
index aeb95f4..82e1e24 100644 (file)
@@ -5720,7 +5720,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
                                "Failed to register IRQ for vector %d.\n", i);
                        for (j = 0; j < i; j++) {
                                if (j < instance->low_latency_index_start)
-                                       irq_set_affinity_hint(
+                                       irq_update_affinity_hint(
                                                pci_irq_vector(pdev, j), NULL);
                                free_irq(pci_irq_vector(pdev, j),
                                         &instance->irq_context[j]);
@@ -5763,7 +5763,7 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
        if (instance->msix_vectors)
                for (i = 0; i < instance->msix_vectors; i++) {
                        if (i < instance->low_latency_index_start)
-                               irq_set_affinity_hint(
+                               irq_update_affinity_hint(
                                    pci_irq_vector(instance->pdev, i), NULL);
                        free_irq(pci_irq_vector(instance->pdev, i),
                                 &instance->irq_context[i]);
@@ -5894,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance)
 }
 
 /**
- * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
- * @instance:                                  Adapter soft state
- * return:                                     void
+ * megasas_set_high_iops_queue_affinity_and_hint -     Set affinity and hint
+ *                                                     for high IOPS queues
+ * @instance:                                          Adapter soft state
+ * return:                                             void
  */
 static inline void
-megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
 {
        int i;
-       int local_numa_node;
+       unsigned int irq;
+       const struct cpumask *mask;
 
        if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
-               local_numa_node = dev_to_node(&instance->pdev->dev);
+               mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
 
-               for (i = 0; i < instance->low_latency_index_start; i++)
-                       irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
-                               cpumask_of_node(local_numa_node));
+               for (i = 0; i < instance->low_latency_index_start; i++) {
+                       irq = pci_irq_vector(instance->pdev, i);
+                       irq_set_affinity_and_hint(irq, mask);
+               }
        }
 }
 
@@ -5998,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
                instance->msix_vectors = 0;
 
        if (instance->smp_affinity_enable)
-               megasas_set_high_iops_queue_affinity_hint(instance);
+               megasas_set_high_iops_queue_affinity_and_hint(instance);
 }
 
 /**
index 81dab9b..511726f 100644 (file)
@@ -3086,6 +3086,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 void
 mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
 {
+       unsigned int irq;
        struct adapter_reply_queue *reply_q, *next;
 
        if (list_empty(&ioc->reply_queue_list))
@@ -3098,9 +3099,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
                        continue;
                }
 
-               if (ioc->smp_affinity_enable)
-                       irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
-                           reply_q->msix_index), NULL);
+               if (ioc->smp_affinity_enable) {
+                       irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
+                       irq_update_affinity_hint(irq, NULL);
+               }
                free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
                         reply_q);
                kfree(reply_q);
@@ -3167,18 +3169,15 @@ out:
  * @ioc: per adapter object
  *
  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
- *
- * It would nice if we could call irq_set_affinity, however it is not
- * an exported symbol
  */
 static void
 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
 {
-       unsigned int cpu, nr_cpus, nr_msix, index = 0;
+       unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
        struct adapter_reply_queue *reply_q;
-       int local_numa_node;
        int iopoll_q_count = ioc->reply_queue_count -
            ioc->iopoll_q_start_index;
+       const struct cpumask *mask;
 
        if (!_base_is_controller_msix_enabled(ioc))
                return;
@@ -3201,11 +3200,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
                 * corresponding to high iops queues.
                 */
                if (ioc->high_iops_queues) {
-                       local_numa_node = dev_to_node(&ioc->pdev->dev);
+                       mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
                        for (index = 0; index < ioc->high_iops_queues;
                            index++) {
-                               irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
-                                   index), cpumask_of_node(local_numa_node));
+                               irq = pci_irq_vector(ioc->pdev, index);
+                               irq_set_affinity_and_hint(irq, mask);
                        }
                }
 
index bed8cc1..fbfeb0b 100644 (file)
@@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        if (rc) {
                pm8001_dbg(pm8001_ha, FAIL,
                           "pm8001_setup_irq failed [ret: %d]\n", rc);
-               goto err_out_shost;
+               goto err_out;
        }
        /* Request Interrupt */
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
-               goto err_out_shost;
+               goto err_out;
 
        count = pm8001_ha->max_q_num;
        /* Queues are chosen based on the number of cores/msix availability */
@@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        pm8001_tag_init(pm8001_ha);
        return 0;
 
-err_out_shost:
-       scsi_remove_host(pm8001_ha->shost);
 err_out_nodev:
        for (i = 0; i < pm8001_ha->max_memcnt; i++) {
                if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
index b9f6d83..2101fc5 100644 (file)
@@ -3053,7 +3053,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        struct smp_completion_resp *psmpPayload;
        struct task_status_struct *ts;
        struct pm8001_device *pm8001_dev;
-       char *pdma_respaddr = NULL;
 
        psmpPayload = (struct smp_completion_resp *)(piomb + 4);
        status = le32_to_cpu(psmpPayload->status);
@@ -3080,19 +3079,23 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                if (pm8001_dev)
                        atomic_dec(&pm8001_dev->running_req);
                if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+                       struct scatterlist *sg_resp = &t->smp_task.smp_resp;
+                       u8 *payload;
+                       void *to;
+
                        pm8001_dbg(pm8001_ha, IO,
                                   "DIRECT RESPONSE Length:%d\n",
                                   param);
-                       pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
-                                               ((u64)sg_dma_address
-                                               (&t->smp_task.smp_resp))));
+                       to = kmap_atomic(sg_page(sg_resp));
+                       payload = to + sg_resp->offset;
                        for (i = 0; i < param; i++) {
-                               *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+                               *(payload + i) = psmpPayload->_r_a[i];
                                pm8001_dbg(pm8001_ha, IO,
                                           "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
-                                          i, *(pdma_respaddr + i),
+                                          i, *(payload + i),
                                           psmpPayload->_r_a[i]);
                        }
+                       kunmap_atomic(to);
                }
                break;
        case IO_ABORTED:
@@ -4236,14 +4239,14 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        struct sas_task *task = ccb->task;
        struct domain_device *dev = task->dev;
        struct pm8001_device *pm8001_dev = dev->lldd_dev;
-       struct scatterlist *sg_req, *sg_resp;
+       struct scatterlist *sg_req, *sg_resp, *smp_req;
        u32 req_len, resp_len;
        struct smp_req smp_cmd;
        u32 opc;
        struct inbound_queue_table *circularQ;
-       char *preq_dma_addr = NULL;
-       __le64 tmp_addr;
        u32 i, length;
+       u8 *payload;
+       u8 *to;
 
        memset(&smp_cmd, 0, sizeof(smp_cmd));
        /*
@@ -4280,8 +4283,9 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                pm8001_ha->smp_exp_mode = SMP_INDIRECT;
 
 
-       tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
-       preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+       smp_req = &task->smp_task.smp_req;
+       to = kmap_atomic(sg_page(smp_req));
+       payload = to + smp_req->offset;
 
        /* INDIRECT MODE command settings. Use DMA */
        if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
@@ -4289,7 +4293,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                /* for SPCv indirect mode. Place the top 4 bytes of
                 * SMP Request header here. */
                for (i = 0; i < 4; i++)
-                       smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+                       smp_cmd.smp_req16[i] = *(payload + i);
                /* exclude top 4 bytes for SMP req header */
                smp_cmd.long_smp_req.long_req_addr =
                        cpu_to_le64((u64)sg_dma_address
@@ -4320,20 +4324,20 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n");
                for (i = 0; i < length; i++)
                        if (i < 16) {
-                               smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+                               smp_cmd.smp_req16[i] = *(payload + i);
                                pm8001_dbg(pm8001_ha, IO,
                                           "Byte[%d]:%x (DMA data:%x)\n",
                                           i, smp_cmd.smp_req16[i],
-                                          *(preq_dma_addr));
+                                          *(payload));
                        } else {
-                               smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+                               smp_cmd.smp_req[i] = *(payload + i);
                                pm8001_dbg(pm8001_ha, IO,
                                           "Byte[%d]:%x (DMA data:%x)\n",
                                           i, smp_cmd.smp_req[i],
-                                          *(preq_dma_addr));
+                                          *(payload));
                        }
        }
-
+       kunmap_atomic(to);
        build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
                                &smp_cmd, pm8001_ha->smp_exp_mode, length);
        rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
index 84a4204..5916ed7 100644 (file)
@@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
 {
        struct qedi_work_map *work, *work_tmp;
        u32 proto_itt = cqe->itid;
-       itt_t protoitt = 0;
        int found = 0;
        struct qedi_cmd *qedi_cmd = NULL;
        u32 iscsi_cid;
@@ -812,16 +811,12 @@ unlock:
        return;
 
 check_cleanup_reqs:
-       if (qedi_conn->cmd_cleanup_req > 0) {
-               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+       if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
+           qedi_conn->cmd_cleanup_req) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                          "Freeing tid=0x%x for cid=0x%x\n",
                          cqe->itid, qedi_conn->iscsi_conn_id);
-               qedi_conn->cmd_cleanup_cmpl++;
                wake_up(&qedi_conn->wait_queue);
-       } else {
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
-                        protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
        }
 }
 
@@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
        }
 
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
@@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
                  qedi_conn->iscsi_conn_id);
 
        rval  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
-                                                ((qedi_conn->cmd_cleanup_req ==
-                                                qedi_conn->cmd_cleanup_cmpl) ||
-                                                test_bit(QEDI_IN_RECOVERY,
-                                                         &qedi->flags)),
-                                                5 * HZ);
+                               (qedi_conn->cmd_cleanup_req ==
+                                atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+                               test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+                               5 * HZ);
        if (rval) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                          "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
                          qedi_conn->cmd_cleanup_req,
-                         qedi_conn->cmd_cleanup_cmpl,
+                         atomic_read(&qedi_conn->cmd_cleanup_cmpl),
                          qedi_conn->iscsi_conn_id);
 
                return 0;
@@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
                  qedi_conn->cmd_cleanup_req,
-                 qedi_conn->cmd_cleanup_cmpl,
+                 atomic_read(&qedi_conn->cmd_cleanup_cmpl),
                  qedi_conn->iscsi_conn_id);
 
        iscsi_host_for_each_session(qedi->shost,
@@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
 
        /* Enable IOs for all other sessions except current.*/
        if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
-                                             (qedi_conn->cmd_cleanup_req ==
-                                              qedi_conn->cmd_cleanup_cmpl) ||
-                                              test_bit(QEDI_IN_RECOVERY,
-                                                       &qedi->flags),
-                                             5 * HZ)) {
+                               (qedi_conn->cmd_cleanup_req ==
+                                atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+                               test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+                               5 * HZ)) {
                iscsi_host_for_each_session(qedi->shost,
                                            qedi_mark_device_available);
                return -1;
@@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
 
        qedi_ep = qedi_conn->ep;
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        if (!qedi_ep) {
                QEDI_WARN(&qedi->dbg_ctx,
index 88aa7d8..282ecb4 100644 (file)
@@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
        qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
        qedi_conn->fw_cid = qedi_ep->fw_cid;
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
                rc = -EINVAL;
index a282860..9b9f2e4 100644 (file)
@@ -155,7 +155,7 @@ struct qedi_conn {
        spinlock_t list_lock;           /* internal conn lock */
        u32 active_cmd_count;
        u32 cmd_cleanup_req;
-       u32 cmd_cleanup_cmpl;
+       atomic_t cmd_cleanup_cmpl;
 
        u32 iscsi_conn_id;
        int itt;
index 25549a8..7cf1f78 100644 (file)
@@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
        struct va_format vaf;
        char pbuf[64];
 
+       if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+               return;
+
        va_start(va, fmt);
 
        vaf.fmt = fmt;
index 3c0da37..2104973 100644 (file)
@@ -4342,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
        rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
                            max_zones);
 
-       arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+       arr = kzalloc(alloc_len, GFP_ATOMIC);
        if (!arr) {
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
                                INSUFF_RES_ASCQ);
index c2ba652..1f037b8 100644 (file)
@@ -586,9 +586,12 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
                         * Commands like INQUIRY may transfer less data than
                         * requested by the initiator via bufflen. Set residual
                         * count to make upper layer aware of the actual amount
-                        * of data returned.
+                        * of data returned. There are cases when controller
+                        * returns zero dataLen with non zero data - do not set
+                        * residual count in that case.
                         */
-                       scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+                       if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
+                               scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
                        cmd->result = (DID_OK << 16);
                        break;
 
index 519b365..c2f076b 100644 (file)
@@ -17,6 +17,7 @@
 
 #define BLK_SFT_RSTN   0x0
 #define BLK_CLK_EN     0x4
+#define BLK_MIPI_RESET_DIV     0x8 /* Mini/Nano DISPLAY_BLK_CTRL only */
 
 struct imx8m_blk_ctrl_domain;
 
@@ -36,6 +37,15 @@ struct imx8m_blk_ctrl_domain_data {
        const char *gpc_name;
        u32 rst_mask;
        u32 clk_mask;
+
+       /*
+        * i.MX8M Mini and Nano have a third DISPLAY_BLK_CTRL register
+        * which is used to control the reset for the MIPI Phy.
+        * Since it's only present in certain circumstances,
+        * an if-statement should be used before setting and clearing this
+        * register.
+        */
+       u32 mipi_phy_rst_mask;
 };
 
 #define DOMAIN_MAX_CLKS 3
@@ -78,6 +88,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
 
        /* put devices into reset */
        regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+       if (data->mipi_phy_rst_mask)
+               regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
 
        /* enable upstream and blk-ctrl clocks to allow reset to propagate */
        ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
@@ -99,6 +111,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
 
        /* release reset */
        regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+       if (data->mipi_phy_rst_mask)
+               regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
 
        /* disable upstream clocks */
        clk_bulk_disable_unprepare(data->num_clks, domain->clks);
@@ -120,6 +134,9 @@ static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
        struct imx8m_blk_ctrl *bc = domain->bc;
 
        /* put devices into reset and disable clocks */
+       if (data->mipi_phy_rst_mask)
+               regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
+
        regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
        regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
 
@@ -480,6 +497,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
                .gpc_name = "mipi-dsi",
                .rst_mask = BIT(5),
                .clk_mask = BIT(8) | BIT(9),
+               .mipi_phy_rst_mask = BIT(17),
        },
        [IMX8MM_DISPBLK_PD_MIPI_CSI] = {
                .name = "dispblk-mipi-csi",
@@ -488,6 +506,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
                .gpc_name = "mipi-csi",
                .rst_mask = BIT(3) | BIT(4),
                .clk_mask = BIT(10) | BIT(11),
+               .mipi_phy_rst_mask = BIT(16),
        },
 };
 
index ac6d856..77bc120 100644 (file)
@@ -36,6 +36,10 @@ static int __init imx_soc_device_init(void)
        int ret;
        int i;
 
+       /* Return early if this is running on devices with different SoCs */
+       if (!__mxc_cpu_type)
+               return 0;
+
        if (of_machine_is_compatible("fsl,ls1021a"))
                return 0;
 
index f215181..e714ed3 100644 (file)
@@ -320,7 +320,7 @@ static struct platform_driver tegra_fuse_driver = {
 };
 builtin_platform_driver(tegra_fuse_driver);
 
-bool __init tegra_fuse_read_spare(unsigned int spare)
+u32 __init tegra_fuse_read_spare(unsigned int spare)
 {
        unsigned int offset = fuse->soc->info->spare + spare * 4;
 
index de58feb..ecff0c0 100644 (file)
@@ -65,7 +65,7 @@ struct tegra_fuse {
 void tegra_init_revision(void);
 void tegra_init_apbmisc(void);
 
-bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_spare(unsigned int spare);
 u32 __init tegra_fuse_read_early(unsigned int offset);
 
 u8 tegra_get_major_rev(void);
index 46feafe..d8cc4b2 100644 (file)
@@ -901,7 +901,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        return 0;
 
 error_clk:
-       clk_disable_unprepare(spi->clk);
+       clk_unprepare(spi->clk);
 error:
        spi_master_put(master);
 out:
index da6b88e..297dc62 100644 (file)
@@ -203,9 +203,8 @@ static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
 
        *ta_size = roundup(fw->size, PAGE_SIZE);
        *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
-       if (IS_ERR(*ta)) {
-               pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
-                      (u64)*ta);
+       if (!*ta) {
+               pr_err("%s: get_free_pages failed\n", __func__);
                rc = -ENOMEM;
                goto rel_fw;
        }
index ab2edfc..2a66a52 100644 (file)
@@ -48,10 +48,8 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
                        goto err;
                }
 
-               for (i = 0; i < nr_pages; i++) {
-                       pages[i] = page;
-                       page++;
-               }
+               for (i = 0; i < nr_pages; i++)
+                       pages[i] = page + i;
 
                shm->flags |= TEE_SHM_REGISTER;
                rc = shm_register(shm->ctx, shm, pages, nr_pages,
index 6196d7c..cf2e329 100644 (file)
@@ -23,6 +23,7 @@
 #include "optee_private.h"
 #include "optee_smc.h"
 #include "optee_rpc_cmd.h"
+#include <linux/kmemleak.h>
 #define CREATE_TRACE_POINTS
 #include "optee_trace.h"
 
@@ -783,6 +784,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
                        param->a4 = 0;
                        param->a5 = 0;
                }
+               kmemleak_not_leak(shm);
                break;
        case OPTEE_SMC_RPC_FUNC_FREE:
                shm = reg_pair_to_ptr(param->a1, param->a2);
index 8a8deb9..499fccb 100644 (file)
@@ -1,20 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
  */
+#include <linux/anon_inodes.h>
 #include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/fdtable.h>
 #include <linux/idr.h>
+#include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/uio.h>
-#include <linux/module.h>
 #include "tee_private.h"
 
-MODULE_IMPORT_NS(DMA_BUF);
-
 static void release_registered_pages(struct tee_shm *shm)
 {
        if (shm->pages) {
@@ -31,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
        }
 }
 
-static void tee_shm_release(struct tee_shm *shm)
+static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
 {
-       struct tee_device *teedev = shm->ctx->teedev;
-
-       if (shm->flags & TEE_SHM_DMA_BUF) {
-               mutex_lock(&teedev->mutex);
-               idr_remove(&teedev->idr, shm->id);
-               mutex_unlock(&teedev->mutex);
-       }
-
        if (shm->flags & TEE_SHM_POOL) {
                struct tee_shm_pool_mgr *poolm;
 
@@ -67,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
        tee_device_put(teedev);
 }
 
-static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
-                       *attach, enum dma_data_direction dir)
-{
-       return NULL;
-}
-
-static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
-                                    struct sg_table *table,
-                                    enum dma_data_direction dir)
-{
-}
-
-static void tee_shm_op_release(struct dma_buf *dmabuf)
-{
-       struct tee_shm *shm = dmabuf->priv;
-
-       tee_shm_release(shm);
-}
-
-static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
-       struct tee_shm *shm = dmabuf->priv;
-       size_t size = vma->vm_end - vma->vm_start;
-
-       /* Refuse sharing shared memory provided by application */
-       if (shm->flags & TEE_SHM_USER_MAPPED)
-               return -EINVAL;
-
-       return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
-                              size, vma->vm_page_prot);
-}
-
-static const struct dma_buf_ops tee_shm_dma_buf_ops = {
-       .map_dma_buf = tee_shm_op_map_dma_buf,
-       .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
-       .release = tee_shm_op_release,
-       .mmap = tee_shm_op_mmap,
-};
-
 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
 {
        struct tee_device *teedev = ctx->teedev;
@@ -140,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                goto err_dev_put;
        }
 
+       refcount_set(&shm->refcount, 1);
        shm->flags = flags | TEE_SHM_POOL;
        shm->ctx = ctx;
        if (flags & TEE_SHM_DMA_BUF)
@@ -153,10 +104,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                goto err_kfree;
        }
 
-
        if (flags & TEE_SHM_DMA_BUF) {
-               DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
                mutex_lock(&teedev->mutex);
                shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
                mutex_unlock(&teedev->mutex);
@@ -164,28 +112,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                        ret = ERR_PTR(shm->id);
                        goto err_pool_free;
                }
-
-               exp_info.ops = &tee_shm_dma_buf_ops;
-               exp_info.size = shm->size;
-               exp_info.flags = O_RDWR;
-               exp_info.priv = shm;
-
-               shm->dmabuf = dma_buf_export(&exp_info);
-               if (IS_ERR(shm->dmabuf)) {
-                       ret = ERR_CAST(shm->dmabuf);
-                       goto err_rem;
-               }
        }
 
        teedev_ctx_get(ctx);
 
        return shm;
-err_rem:
-       if (flags & TEE_SHM_DMA_BUF) {
-               mutex_lock(&teedev->mutex);
-               idr_remove(&teedev->idr, shm->id);
-               mutex_unlock(&teedev->mutex);
-       }
 err_pool_free:
        poolm->ops->free(poolm, shm);
 err_kfree:
@@ -246,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                goto err;
        }
 
+       refcount_set(&shm->refcount, 1);
        shm->flags = flags | TEE_SHM_REGISTER;
        shm->ctx = ctx;
        shm->id = -1;
@@ -306,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                goto err;
        }
 
-       if (flags & TEE_SHM_DMA_BUF) {
-               DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-               exp_info.ops = &tee_shm_dma_buf_ops;
-               exp_info.size = shm->size;
-               exp_info.flags = O_RDWR;
-               exp_info.priv = shm;
-
-               shm->dmabuf = dma_buf_export(&exp_info);
-               if (IS_ERR(shm->dmabuf)) {
-                       ret = ERR_CAST(shm->dmabuf);
-                       teedev->desc->ops->shm_unregister(ctx, shm);
-                       goto err;
-               }
-       }
-
        return shm;
 err:
        if (shm) {
@@ -339,6 +255,35 @@ err:
 }
 EXPORT_SYMBOL_GPL(tee_shm_register);
 
+static int tee_shm_fop_release(struct inode *inode, struct file *filp)
+{
+       tee_shm_put(filp->private_data);
+       return 0;
+}
+
+static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct tee_shm *shm = filp->private_data;
+       size_t size = vma->vm_end - vma->vm_start;
+
+       /* Refuse sharing shared memory provided by application */
+       if (shm->flags & TEE_SHM_USER_MAPPED)
+               return -EINVAL;
+
+       /* check for overflowing the buffer's size */
+       if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+                              size, vma->vm_page_prot);
+}
+
+static const struct file_operations tee_shm_fops = {
+       .owner = THIS_MODULE,
+       .release = tee_shm_fop_release,
+       .mmap = tee_shm_fop_mmap,
+};
+
 /**
  * tee_shm_get_fd() - Increase reference count and return file descriptor
  * @shm:       Shared memory handle
@@ -351,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
        if (!(shm->flags & TEE_SHM_DMA_BUF))
                return -EINVAL;
 
-       get_dma_buf(shm->dmabuf);
-       fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+       /* matched by tee_shm_put() in tee_shm_op_release() */
+       refcount_inc(&shm->refcount);
+       fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
        if (fd < 0)
-               dma_buf_put(shm->dmabuf);
+               tee_shm_put(shm);
        return fd;
 }
 
@@ -364,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
  */
 void tee_shm_free(struct tee_shm *shm)
 {
-       /*
-        * dma_buf_put() decreases the dmabuf reference counter and will
-        * call tee_shm_release() when the last reference is gone.
-        *
-        * In the case of driver private memory we call tee_shm_release
-        * directly instead as it doesn't have a reference counter.
-        */
-       if (shm->flags & TEE_SHM_DMA_BUF)
-               dma_buf_put(shm->dmabuf);
-       else
-               tee_shm_release(shm);
+       tee_shm_put(shm);
 }
 EXPORT_SYMBOL_GPL(tee_shm_free);
 
@@ -481,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
        teedev = ctx->teedev;
        mutex_lock(&teedev->mutex);
        shm = idr_find(&teedev->idr, id);
+       /*
+        * If the tee_shm was found in the IDR it must have a refcount
+        * larger than 0 due to the guarantee in tee_shm_put() below. So
+        * it's safe to use refcount_inc().
+        */
        if (!shm || shm->ctx != ctx)
                shm = ERR_PTR(-EINVAL);
-       else if (shm->flags & TEE_SHM_DMA_BUF)
-               get_dma_buf(shm->dmabuf);
+       else
+               refcount_inc(&shm->refcount);
        mutex_unlock(&teedev->mutex);
        return shm;
 }
@@ -496,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
  */
 void tee_shm_put(struct tee_shm *shm)
 {
-       if (shm->flags & TEE_SHM_DMA_BUF)
-               dma_buf_put(shm->dmabuf);
+       struct tee_device *teedev = shm->ctx->teedev;
+       bool do_release = false;
+
+       mutex_lock(&teedev->mutex);
+       if (refcount_dec_and_test(&shm->refcount)) {
+               /*
+                * refcount has reached 0, we must now remove it from the
+                * IDR before releasing the mutex. This will guarantee that
+                * the refcount_inc() in tee_shm_get_from_id() never starts
+                * from 0.
+                */
+               if (shm->flags & TEE_SHM_DMA_BUF)
+                       idr_remove(&teedev->idr, shm->id);
+               do_release = true;
+       }
+       mutex_unlock(&teedev->mutex);
+
+       if (do_release)
+               tee_shm_release(teedev, shm);
 }
 EXPORT_SYMBOL_GPL(tee_shm_put);
index b25b54d..e693ec8 100644 (file)
@@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
 };
 
 static const struct mmio_reg tgl_fivr_mmio_regs[] = {
-       { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
+       { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
        { 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
        { 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
        { 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
index 71e0dd2..ebaf750 100644 (file)
@@ -37,6 +37,8 @@ struct xencons_info {
        struct xenbus_device *xbdev;
        struct xencons_interface *intf;
        unsigned int evtchn;
+       XENCONS_RING_IDX out_cons;
+       unsigned int out_cons_same;
        struct hvc_struct *hvc;
        int irq;
        int vtermno;
@@ -138,6 +140,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
        XENCONS_RING_IDX cons, prod;
        int recv = 0;
        struct xencons_info *xencons = vtermno_to_xencons(vtermno);
+       unsigned int eoiflag = 0;
+
        if (xencons == NULL)
                return -EINVAL;
        intf = xencons->intf;
@@ -157,7 +161,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
        mb();                   /* read ring before consuming */
        intf->in_cons = cons;
 
-       notify_daemon(xencons);
+       /*
+        * When to mark interrupt having been spurious:
+        * - there was no new data to be read, and
+        * - the backend did not consume some output bytes, and
+        * - the previous round with no read data didn't see consumed bytes
+        *   (we might have a race with an interrupt being in flight while
+        *   updating xencons->out_cons, so account for that by allowing one
+        *   round without any visible reason)
+        */
+       if (intf->out_cons != xencons->out_cons) {
+               xencons->out_cons = intf->out_cons;
+               xencons->out_cons_same = 0;
+       }
+       if (recv) {
+               notify_daemon(xencons);
+       } else if (xencons->out_cons_same++ > 1) {
+               eoiflag = XEN_EOI_FLAG_SPURIOUS;
+       }
+
+       xen_irq_lateeoi(xencons->irq, eoiflag);
+
        return recv;
 }
 
@@ -386,7 +410,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
        if (ret)
                return ret;
        info->evtchn = evtchn;
-       irq = bind_evtchn_to_irq(evtchn);
+       irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
        if (irq < 0)
                return irq;
        info->irq = irq;
@@ -551,7 +575,7 @@ static int __init xen_hvc_init(void)
                        return r;
 
                info = vtermno_to_xencons(HVC_COOKIE);
-               info->irq = bind_evtchn_to_irq(info->evtchn);
+               info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
        }
        if (info->irq < 0)
                info->irq = 0; /* NO_IRQ */
index 7e0884e..23ba1fc 100644 (file)
@@ -140,6 +140,8 @@ struct n_hdlc {
        struct n_hdlc_buf_list  rx_buf_list;
        struct n_hdlc_buf_list  tx_free_buf_list;
        struct n_hdlc_buf_list  rx_free_buf_list;
+       struct work_struct      write_work;
+       struct tty_struct       *tty_for_write_work;
 };
 
 /*
@@ -154,6 +156,7 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
 /* Local functions */
 
 static struct n_hdlc *n_hdlc_alloc(void);
+static void n_hdlc_tty_write_work(struct work_struct *work);
 
 /* max frame size for memory allocations */
 static int maxframe = 4096;
@@ -210,6 +213,8 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
        wake_up_interruptible(&tty->read_wait);
        wake_up_interruptible(&tty->write_wait);
 
+       cancel_work_sync(&n_hdlc->write_work);
+
        n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
        n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
        n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
@@ -241,6 +246,8 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
                return -ENFILE;
        }
 
+       INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work);
+       n_hdlc->tty_for_write_work = tty;
        tty->disc_data = n_hdlc;
        tty->receive_room = 65536;
 
@@ -334,6 +341,20 @@ check_again:
                goto check_again;
 }      /* end of n_hdlc_send_frames() */
 
+/**
+ * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup
+ * @work: pointer to work_struct
+ *
+ * Called when low level device driver can accept more send data.
+ */
+static void n_hdlc_tty_write_work(struct work_struct *work)
+{
+       struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work);
+       struct tty_struct *tty = n_hdlc->tty_for_write_work;
+
+       n_hdlc_send_frames(n_hdlc, tty);
+}      /* end of n_hdlc_tty_write_work() */
+
 /**
  * n_hdlc_tty_wakeup - Callback for transmit wakeup
  * @tty: pointer to associated tty instance data
@@ -344,7 +365,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
 {
        struct n_hdlc *n_hdlc = tty->disc_data;
 
-       n_hdlc_send_frames(n_hdlc, tty);
+       schedule_work(&n_hdlc->write_work);
 }      /* end of n_hdlc_tty_wakeup() */
 
 /**
index 31c9e83..251f001 100644 (file)
@@ -290,25 +290,6 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
        }
 }
 
-static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
-                             struct fintek_8250 *pdata)
-{
-       sio_write_reg(pdata, LDN, pdata->index);
-
-       switch (pdata->pid) {
-       case CHIP_ID_F81966:
-       case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
-               sio_write_mask_reg(pdata, F81866_UART_CLK,
-                       F81866_UART_CLK_MASK,
-                       F81866_UART_CLK_14_769MHZ);
-
-               uart->port.uartclk = 921600 * 16;
-               break;
-       default: /* leave clock speed untouched */
-               break;
-       }
-}
-
 static void fintek_8250_set_termios(struct uart_port *port,
                                    struct ktermios *termios,
                                    struct ktermios *old)
@@ -430,7 +411,6 @@ static int probe_setup_port(struct fintek_8250 *pdata,
 
                                fintek_8250_set_irq_mode(pdata, level_mode);
                                fintek_8250_set_max_fifo(pdata);
-                               fintek_8250_goto_highspeed(uart, pdata);
 
                                fintek_8250_exit_key(addr[i]);
 
index 27df0c6..e85bf76 100644 (file)
@@ -1541,15 +1541,27 @@ static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
 {
        struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
        struct cdns *cdns = dev_get_drvdata(pdev->dev);
+       unsigned long flags;
 
        trace_cdnsp_pullup(is_on);
 
+       /*
+        * Disable events handling while controller is being
+        * enabled/disabled.
+        */
+       disable_irq(cdns->dev_irq);
+       spin_lock_irqsave(&pdev->lock, flags);
+
        if (!is_on) {
                cdnsp_reset_device(pdev);
                cdns_clear_vbus(cdns);
        } else {
                cdns_set_vbus(cdns);
        }
+
+       spin_unlock_irqrestore(&pdev->lock, flags);
+       enable_irq(cdns->dev_irq);
+
        return 0;
 }
 
index 1b14384..e45c3d6 100644 (file)
@@ -1029,6 +1029,8 @@ static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
                return;
        }
 
+       *status = 0;
+
        cdnsp_finish_td(pdev, td, event, pep, status);
 }
 
@@ -1523,7 +1525,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
        spin_lock_irqsave(&pdev->lock, flags);
 
        if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
-               cdnsp_died(pdev);
+               /*
+                * While removing or stopping driver there may still be deferred
+                * not handled interrupt which should not be treated as error.
+                * Driver should simply ignore it.
+                */
+               if (pdev->gadget_driver)
+                       cdnsp_died(pdev);
+
                spin_unlock_irqrestore(&pdev->lock, flags);
                return IRQ_HANDLED;
        }
index 6a2571c..5983dfb 100644 (file)
@@ -57,9 +57,9 @@ DECLARE_EVENT_CLASS(cdnsp_log_ep,
                __entry->first_prime_det = pep->stream_info.first_prime_det;
                __entry->drbls_count = pep->stream_info.drbls_count;
        ),
-       TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num  %d "
+       TP_printk("%s: SID: %08x, ep state: %x, stream: enabled: %d num %d "
                  "tds %d, first prime: %d drbls %d",
-                 __get_str(name), __entry->state, __entry->stream_id,
+                 __get_str(name), __entry->stream_id, __entry->state,
                  __entry->enabled, __entry->num_streams, __entry->td_count,
                  __entry->first_prime_det, __entry->drbls_count)
 );
index 84dadfa..9643b90 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include "core.h"
 #include "drd.h"
 #include "host-export.h"
index 16b1fd9..48bc8a4 100644 (file)
@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
         * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
         * (see the end of section 5.6.3), so don't warn about them.
         */
-       maxp = usb_endpoint_maxp(&endpoint->desc);
+       maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
        if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
                dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
                    cfgno, inum, asnum, d->bEndpointAddress);
@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
                maxpacket_maxes = full_speed_maxpacket_maxes;
                break;
        case USB_SPEED_HIGH:
-               /* Bits 12..11 are allowed only for HS periodic endpoints */
+               /* Multiple-transactions bits are allowed only for HS periodic endpoints */
                if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
-                       i = maxp & (BIT(12) | BIT(11));
+                       i = maxp & USB_EP_MAXP_MULT_MASK;
                        maxp &= ~i;
                }
                fallthrough;
index 019351c..d3c14b5 100644 (file)
@@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1532, 0x0116), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
+       { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
        { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
 
index c8f18f3..c331a51 100644 (file)
@@ -575,6 +575,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
                ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
                ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
                dwc2_writel(hsotg, ggpio, GGPIO);
+
+               /* ID/VBUS detection startup time */
+               usleep_range(5000, 7000);
        }
 
        retval = dwc2_drd_init(hsotg);
index 9abbd01..3cb01cd 100644 (file)
@@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
        struct dwc3_qcom        *qcom = platform_get_drvdata(pdev);
        struct device_node      *np = pdev->dev.of_node, *dwc3_np;
        struct device           *dev = &pdev->dev;
-       struct property         *prop;
        int                     ret;
 
        dwc3_np = of_get_compatible_child(np, "snps,dwc3");
@@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
-       if (!prop) {
-               ret = -ENOMEM;
-               dev_err(dev, "unable to allocate memory for property\n");
-               goto node_put;
-       }
-
-       prop->name = "tx-fifo-resize";
-       ret = of_add_property(dwc3_np, prop);
-       if (ret) {
-               dev_err(dev, "unable to add property\n");
-               goto node_put;
-       }
-
        ret = of_platform_populate(np, NULL, NULL, dev);
        if (ret) {
                dev_err(dev, "failed to register dwc3 core - %d\n", ret);
index 933d77a..4502108 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/pci_ids.h>
 #include <linux/memblock.h>
 #include <linux/io.h>
-#include <linux/iopoll.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
 #include <linux/bcd.h>
@@ -136,9 +135,17 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
 {
        u32 result;
 
-       return readl_poll_timeout_atomic(ptr, result,
-                                        ((result & mask) == done),
-                                        delay, wait);
+       /* Can not use readl_poll_timeout_atomic() for early boot things */
+       do {
+               result = readl(ptr);
+               result &= mask;
+               if (result == done)
+                       return 0;
+               udelay(delay);
+               wait -= delay;
+       } while (wait > 0);
+
+       return -ETIMEDOUT;
 }
 
 static void __init xdbc_bios_handoff(void)
index 504c1cb..3789c32 100644 (file)
@@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        struct usb_function             *f = NULL;
        u8                              endp;
 
+       if (w_length > USB_COMP_EP0_BUFSIZ) {
+               if (ctrl->bRequestType & USB_DIR_IN) {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
+                       w_length = USB_COMP_EP0_BUFSIZ;
+               } else {
+                       goto done;
+               }
+       }
+
        /* partial re-init of the response message; the function or the
         * gadget might need to intercept e.g. a control-OUT completion
         * when we delegate to it.
@@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
        if (!cdev->req)
                return -ENOMEM;
 
-       cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+       cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
        if (!cdev->req->buf)
                goto fail;
 
index e20c19a..a7e069b 100644 (file)
@@ -1773,11 +1773,15 @@ static void ffs_data_clear(struct ffs_data *ffs)
 
        BUG_ON(ffs->gadget);
 
-       if (ffs->epfiles)
+       if (ffs->epfiles) {
                ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+               ffs->epfiles = NULL;
+       }
 
-       if (ffs->ffs_eventfd)
+       if (ffs->ffs_eventfd) {
                eventfd_ctx_put(ffs->ffs_eventfd);
+               ffs->ffs_eventfd = NULL;
+       }
 
        kfree(ffs->raw_descs_data);
        kfree(ffs->raw_strings);
@@ -1790,7 +1794,6 @@ static void ffs_data_reset(struct ffs_data *ffs)
 
        ffs_data_clear(ffs);
 
-       ffs->epfiles = NULL;
        ffs->raw_descs_data = NULL;
        ffs->raw_descs = NULL;
        ffs->raw_strings = NULL;
index e0ad5ae..6f5d45e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
 
 #include "u_ether.h"
 
@@ -863,19 +864,23 @@ int gether_register_netdev(struct net_device *net)
 {
        struct eth_dev *dev;
        struct usb_gadget *g;
-       struct sockaddr sa;
        int status;
 
        if (!net->dev.parent)
                return -EINVAL;
        dev = netdev_priv(net);
        g = dev->gadget;
+
+       net->addr_assign_type = NET_ADDR_RANDOM;
+       eth_hw_addr_set(net, dev->dev_mac);
+
        status = register_netdev(net);
        if (status < 0) {
                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
                return status;
        } else {
                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+               INFO(dev, "MAC %pM\n", dev->dev_mac);
 
                /* two kinds of host-initiated state changes:
                 *  - iff DATA transfer is active, carrier is "on"
@@ -883,15 +888,6 @@ int gether_register_netdev(struct net_device *net)
                 */
                netif_carrier_off(net);
        }
-       sa.sa_family = net->type;
-       memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
-       rtnl_lock();
-       status = dev_set_mac_address(net, &sa, NULL);
-       rtnl_unlock();
-       if (status)
-               pr_warn("cannot set self ethernet address: %d\n", status);
-       else
-               INFO(dev, "MAC %pM\n", dev->dev_mac);
 
        return status;
 }
index e1d566c..6bcbad3 100644 (file)
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
                goto fail_1;
        }
 
-       req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+       req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
        if (!req->buf) {
                err = -ENOMEM;
                stp = 2;
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
        void *data = NULL;
        u16 len = 0;
 
+       if (length > DBGP_REQ_LEN) {
+               if (ctrl->bRequestType & USB_DIR_IN) {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(DBGP_REQ_LEN);
+                       length = DBGP_REQ_LEN;
+               } else {
+                       return err;
+               }
+       }
+
+
        if (request == USB_REQ_GET_DESCRIPTOR) {
                switch (value>>8) {
                case USB_DT_DEVICE:
index 78be947..3b58f4f 100644 (file)
@@ -110,6 +110,8 @@ enum ep0_state {
 /* enough for the whole queue: most events invalidate others */
 #define        N_EVENT                 5
 
+#define RBUF_SIZE              256
+
 struct dev_data {
        spinlock_t                      lock;
        refcount_t                      count;
@@ -144,7 +146,7 @@ struct dev_data {
        struct dentry                   *dentry;
 
        /* except this scratch i/o buffer for ep0 */
-       u8                              rbuf [256];
+       u8                              rbuf[RBUF_SIZE];
 };
 
 static inline void get_dev (struct dev_data *data)
@@ -1331,6 +1333,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        u16                             w_value = le16_to_cpu(ctrl->wValue);
        u16                             w_length = le16_to_cpu(ctrl->wLength);
 
+       if (w_length > RBUF_SIZE) {
+               if (ctrl->bRequestType & USB_DIR_IN) {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(RBUF_SIZE);
+                       w_length = RBUF_SIZE;
+               } else {
+                       return value;
+               }
+       }
+
        spin_lock (&dev->lock);
        dev->setup_abort = 0;
        if (dev->state == STATE_DEV_UNCONNECTED) {
index af946c4..df3522d 100644 (file)
@@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
                        continue;
 
                retval = xhci_disable_slot(xhci, i);
+               xhci_free_virt_device(xhci, i);
                if (retval)
                        xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
                                 i, retval);
index 1edef75..edbfa82 100644 (file)
@@ -781,7 +781,7 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 
        ret = xhci_check_bandwidth(hcd, udev);
        if (!ret)
-               INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+               list_del_init(&mtk->bw_ep_chk_list);
 
        return ret;
 }
index 92adf61..5c35197 100644 (file)
@@ -71,6 +71,8 @@
 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4           0x161e
 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5           0x15d6
 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6           0x15d7
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7           0x161c
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8           0x161f
 
 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI                        0x1042
 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI               0x1142
@@ -121,7 +123,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        /* Look for vendor-specific quirks */
        if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
                        (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
-                        pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
                         pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
                if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
                                pdev->revision == 0x0) {
@@ -156,6 +157,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                        pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
                xhci->quirks |= XHCI_BROKEN_STREAMS;
 
+       if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+                       pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
@@ -330,7 +335,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
            pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
            pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
            pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6))
+           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
+           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
+           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
index eaa49ae..d0b6806 100644 (file)
@@ -1525,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
                /* Delete default control endpoint resources */
                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
-       xhci_free_virt_device(xhci, slot_id);
 }
 
 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
index 902f410..f5b1bcc 100644 (file)
@@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_slot_ctx *slot_ctx;
        int i, ret;
 
-#ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * We called pm_runtime_get_noresume when the device was attached.
         * Decrement the counter here to allow controller to runtime suspend
@@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
         */
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                pm_runtime_put_noidle(hcd->self.controller);
-#endif
 
        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
        /* If the host is halted due to driver unload, we still need to free the
@@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
        }
        virt_dev->udev = NULL;
-       ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
-               xhci_free_virt_device(xhci, udev->slot_id);
+       xhci_disable_slot(xhci, udev->slot_id);
+       xhci_free_virt_device(xhci, udev->slot_id);
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
        u32 state;
        int ret = 0;
 
-       command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+       command = xhci_alloc_command(xhci, true, GFP_KERNEL);
        if (!command)
                return -ENOMEM;
 
@@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
        }
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
+
+       wait_for_completion(command->completion);
+
+       if (command->status != COMP_SUCCESS)
+               xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
+                         slot_id, command->status);
+
+       xhci_free_command(xhci, command);
+
        return ret;
 }
 
@@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 
        xhci_debugfs_create_slot(xhci, slot_id);
 
-#ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * If resetting upon resume, we can't put the controller into runtime
         * suspend if there is a device attached.
         */
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                pm_runtime_get_noresume(hcd->self.controller);
-#endif
 
        /* Is this a LS or FS device under a HS hub? */
        /* Hub or peripherial? */
        return 1;
 
 disable_slot:
-       ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
-               xhci_free_virt_device(xhci, udev->slot_id);
+       xhci_disable_slot(xhci, udev->slot_id);
+       xhci_free_virt_device(xhci, udev->slot_id);
 
        return 0;
 }
@@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 
                mutex_unlock(&xhci->mutex);
                ret = xhci_disable_slot(xhci, udev->slot_id);
+               xhci_free_virt_device(xhci, udev->slot_id);
                if (!ret)
                        xhci_alloc_dev(hcd, udev);
                kfree(command->completion);
index a9a65b4..9977600 100644 (file)
@@ -77,7 +77,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
                if (usb_endpoint_xfer_int(desc) ||
                                usb_endpoint_xfer_isoc(desc)) {
                        interval = desc->bInterval;
-                       interval = clamp_val(interval, 1, 16) - 1;
+                       interval = clamp_val(interval, 1, 16);
                        if (usb_endpoint_xfer_isoc(desc) && comp_desc)
                                mult = comp_desc->bmAttributes;
                }
@@ -89,9 +89,16 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
                if (usb_endpoint_xfer_isoc(desc) ||
                                usb_endpoint_xfer_int(desc)) {
                        interval = desc->bInterval;
-                       interval = clamp_val(interval, 1, 16) - 1;
+                       interval = clamp_val(interval, 1, 16);
                        mult = usb_endpoint_maxp_mult(desc) - 1;
                }
+               break;
+       case USB_SPEED_FULL:
+               if (usb_endpoint_xfer_isoc(desc))
+                       interval = clamp_val(desc->bInterval, 1, 16);
+               else if (usb_endpoint_xfer_int(desc))
+                       interval = clamp_val(desc->bInterval, 1, 255);
+
                break;
        default:
                break; /*others are ignored */
@@ -235,6 +242,7 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
        mreq->request.dma = DMA_ADDR_INVALID;
        mreq->epnum = mep->epnum;
        mreq->mep = mep;
+       INIT_LIST_HEAD(&mreq->list);
        trace_mtu3_alloc_request(mreq);
 
        return &mreq->request;
index 3f414f9..2ea3157 100644 (file)
@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
                        gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
        }
 
+       /* prevent reorder, make sure GPD's HWO is set last */
+       mb();
        gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
 
        mreq->gpd = gpd;
@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
        gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
        ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
        gpd->dw3_info = cpu_to_le32(ext_addr);
+       /* prevent reorder, make sure GPD's HWO is set last */
+       mb();
        gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
 
        mreq->gpd = gpd;
@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
                return;
        }
        mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
-
+       /* prevent reorder, make sure GPD's HWO is set last */
+       mb();
        /* by pass the current GDP */
        gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
 
index 7705328..8a60c0d 100644 (file)
@@ -1635,6 +1635,8 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
 
        /*  2 banks of GPIO - One for the pins taken from each serial port */
        if (intf_num == 0) {
+               priv->gc.ngpio = 2;
+
                if (mode.eci == CP210X_PIN_MODE_MODEM) {
                        /* mark all GPIOs of this interface as reserved */
                        priv->gpio_altfunc = 0xff;
@@ -1645,8 +1647,9 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
                priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
                                                CP210X_ECI_GPIO_MODE_MASK) >>
                                                CP210X_ECI_GPIO_MODE_OFFSET);
-               priv->gc.ngpio = 2;
        } else if (intf_num == 1) {
+               priv->gc.ngpio = 3;
+
                if (mode.sci == CP210X_PIN_MODE_MODEM) {
                        /* mark all GPIOs of this interface as reserved */
                        priv->gpio_altfunc = 0xff;
@@ -1657,7 +1660,6 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
                priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
                                                CP210X_SCI_GPIO_MODE_MASK) >>
                                                CP210X_SCI_GPIO_MODE_OFFSET);
-               priv->gc.ngpio = 3;
        } else {
                return -ENODEV;
        }
index 546fce4..42420bf 100644 (file)
@@ -1219,6 +1219,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff),    /* Telit LN920 (ECM) */
          .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff),    /* Telit FN990 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff),    /* Telit FN990 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff),    /* Telit FN990 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),    /* Telit FN990 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
index 6010b99..59d4fa2 100644 (file)
@@ -324,6 +324,7 @@ struct tcpm_port {
 
        bool attached;
        bool connected;
+       bool registered;
        bool pd_supported;
        enum typec_port_type port_type;
 
@@ -6291,7 +6292,8 @@ static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
 
-       kthread_queue_work(port->wq, &port->state_machine);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->state_machine);
        return HRTIMER_NORESTART;
 }
 
@@ -6299,7 +6301,8 @@ static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *time
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
 
-       kthread_queue_work(port->wq, &port->vdm_state_machine);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->vdm_state_machine);
        return HRTIMER_NORESTART;
 }
 
@@ -6307,7 +6310,8 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
 
-       kthread_queue_work(port->wq, &port->enable_frs);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->enable_frs);
        return HRTIMER_NORESTART;
 }
 
@@ -6315,7 +6319,8 @@ static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
 
-       kthread_queue_work(port->wq, &port->send_discover_work);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->send_discover_work);
        return HRTIMER_NORESTART;
 }
 
@@ -6403,6 +6408,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        typec_port_register_altmodes(port->typec_port,
                                     &tcpm_altmode_ops, port,
                                     port->port_altmode, ALTMODE_DISCOVERY_MAX);
+       port->registered = true;
 
        mutex_lock(&port->lock);
        tcpm_init(port);
@@ -6424,6 +6430,9 @@ void tcpm_unregister_port(struct tcpm_port *port)
 {
        int i;
 
+       port->registered = false;
+       kthread_destroy_worker(port->wq);
+
        hrtimer_cancel(&port->send_discover_timer);
        hrtimer_cancel(&port->enable_frs_timer);
        hrtimer_cancel(&port->vdm_state_machine_timer);
@@ -6435,7 +6444,6 @@ void tcpm_unregister_port(struct tcpm_port *port)
        typec_unregister_port(port->typec_port);
        usb_role_switch_put(port->role_sw);
        tcpm_debugfs_exit(port);
-       kthread_destroy_worker(port->wq);
 }
 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
 
index 6aa2838..08561bf 100644 (file)
@@ -1150,7 +1150,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
                ret = 0;
        }
 
-       if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == UCSI_CONSTAT_PWR_OPMODE_PD) {
+       if (con->partner &&
+           UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
+           UCSI_CONSTAT_PWR_OPMODE_PD) {
                ucsi_get_src_pdos(con);
                ucsi_check_altmodes(con);
        }
index 7332a74..09bbe53 100644 (file)
@@ -404,7 +404,8 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
                goto msg_err;
 
        while (mdev->id_table[i].device) {
-               supported_classes |= BIT(mdev->id_table[i].device);
+               if (mdev->id_table[i].device <= 63)
+                       supported_classes |= BIT_ULL(mdev->id_table[i].device);
                i++;
        }
 
index c9204c6..eddcb64 100644 (file)
@@ -655,7 +655,8 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
 {
        struct vduse_dev *dev = vdpa_to_vduse(vdpa);
 
-       if (len > dev->config_size - offset)
+       if (offset > dev->config_size ||
+           len > dev->config_size - offset)
                return;
 
        memcpy(buf, dev->config + offset, len);
@@ -975,7 +976,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
                        break;
 
                ret = -EINVAL;
-               if (config.length == 0 ||
+               if (config.offset > dev->config_size ||
+                   config.length == 0 ||
                    config.length > dev->config_size - config.offset)
                        break;
 
index 29cced1..e3c4f05 100644 (file)
@@ -197,7 +197,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
        struct vdpa_device *vdpa = v->vdpa;
        long size = vdpa->config->get_config_size(vdpa);
 
-       if (c->len == 0)
+       if (c->len == 0 || c->off > size)
                return -EINVAL;
 
        if (c->len > size - c->off)
index 826175a..0fa7ede 100644 (file)
@@ -1762,6 +1762,53 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
 }
 EXPORT_SYMBOL(remove_conflicting_framebuffers);
 
+/**
+ * is_firmware_framebuffer - detect if firmware-configured framebuffer matches
+ * @a: memory range, users of which are to be checked
+ *
+ * This function checks framebuffer devices (initialized by firmware/bootloader)
+ * which use memory range described by @a. If @a matchesm the function returns
+ * true, otherwise false.
+ */
+bool is_firmware_framebuffer(struct apertures_struct *a)
+{
+       bool do_free = false;
+       bool found = false;
+       int i;
+
+       if (!a) {
+               a = alloc_apertures(1);
+               if (!a)
+                       return false;
+
+               a->ranges[0].base = 0;
+               a->ranges[0].size = ~0;
+               do_free = true;
+       }
+
+       mutex_lock(&registration_lock);
+       /* check all firmware fbs and kick off if the base addr overlaps */
+       for_each_registered_fb(i) {
+               struct apertures_struct *gen_aper;
+
+               if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
+                       continue;
+
+               gen_aper = registered_fb[i]->apertures;
+               if (fb_do_apertures_overlap(gen_aper, a)) {
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&registration_lock);
+
+       if (do_free)
+               kfree(a);
+
+       return found;
+}
+EXPORT_SYMBOL(is_firmware_framebuffer);
+
 /**
  * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
  * @pdev: PCI device
index 8939612..6894ccb 100644 (file)
@@ -886,8 +886,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
                        goto put_pages;
                }
 
-               gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET,
-                                       ne_mem_region->pages + i, NULL);
+               gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
+                                                ne_mem_region->pages + i, FOLL_GET);
+
                if (gup_rc < 0) {
                        rc = gup_rc;
 
index 6d2614e..028b05d 100644 (file)
@@ -268,7 +268,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev)
        size_t max_segment_size = SIZE_MAX;
 
        if (vring_use_dma_api(vdev))
-               max_segment_size = dma_max_mapping_size(&vdev->dev);
+               max_segment_size = dma_max_mapping_size(vdev->dev.parent);
 
        return max_segment_size;
 }
index a78704a..46d9295 100644 (file)
@@ -1251,6 +1251,12 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
 }
 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
 
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+       return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
 {
        struct evtchn_bind_ipi bind_ipi;
index cb6ad61..afe4b80 100644 (file)
@@ -514,8 +514,9 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
        if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
                down_write(&vnode->volume->cell->fs_open_mmaps_lock);
 
-               list_add_tail(&vnode->cb_mmap_link,
-                             &vnode->volume->cell->fs_open_mmaps);
+               if (list_empty(&vnode->cb_mmap_link))
+                       list_add_tail(&vnode->cb_mmap_link,
+                                     &vnode->volume->cell->fs_open_mmaps);
 
                up_write(&vnode->volume->cell->fs_open_mmaps_lock);
        }
index d110def..34c6872 100644 (file)
@@ -667,6 +667,7 @@ static void afs_i_init_once(void *_vnode)
        INIT_LIST_HEAD(&vnode->pending_locks);
        INIT_LIST_HEAD(&vnode->granted_locks);
        INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
+       INIT_LIST_HEAD(&vnode->cb_mmap_link);
        seqlock_init(&vnode->cb_lock);
 }
 
index 9c81cf6..f6f1cbf 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,8 +181,9 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    done;
        bool                    cancelled;
+       bool                    work_scheduled;
+       bool                    work_need_resched;
        struct wait_queue_entry wait;
        struct work_struct      work;
 };
@@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work)
        iocb_put(iocb);
 }
 
+/*
+ * Safely lock the waitqueue which the request is on, synchronizing with the
+ * case where the ->poll() provider decides to free its waitqueue early.
+ *
+ * Returns true on success, meaning that req->head->lock was locked, req->wait
+ * is on req->head, and an RCU read lock was taken.  Returns false if the
+ * request was already removed from its waitqueue (which might no longer exist).
+ */
+static bool poll_iocb_lock_wq(struct poll_iocb *req)
+{
+       wait_queue_head_t *head;
+
+       /*
+        * While we hold the waitqueue lock and the waitqueue is nonempty,
+        * wake_up_pollfree() will wait for us.  However, taking the waitqueue
+        * lock in the first place can race with the waitqueue being freed.
+        *
+        * We solve this as eventpoll does: by taking advantage of the fact that
+        * all users of wake_up_pollfree() will RCU-delay the actual free.  If
+        * we enter rcu_read_lock() and see that the pointer to the queue is
+        * non-NULL, we can then lock it without the memory being freed out from
+        * under us, then check whether the request is still on the queue.
+        *
+        * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+        * case the caller deletes the entry from the queue, leaving it empty.
+        * In that case, only RCU prevents the queue memory from being freed.
+        */
+       rcu_read_lock();
+       head = smp_load_acquire(&req->head);
+       if (head) {
+               spin_lock(&head->lock);
+               if (!list_empty(&req->wait.entry))
+                       return true;
+               spin_unlock(&head->lock);
+       }
+       rcu_read_unlock();
+       return false;
+}
+
+static void poll_iocb_unlock_wq(struct poll_iocb *req)
+{
+       spin_unlock(&req->head->lock);
+       rcu_read_unlock();
+}
+
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work)
         * avoid further branches in the fast path.
         */
        spin_lock_irq(&ctx->ctx_lock);
-       if (!mask && !READ_ONCE(req->cancelled)) {
-               add_wait_queue(req->head, &req->wait);
-               spin_unlock_irq(&ctx->ctx_lock);
-               return;
-       }
+       if (poll_iocb_lock_wq(req)) {
+               if (!mask && !READ_ONCE(req->cancelled)) {
+                       /*
+                        * The request isn't actually ready to be completed yet.
+                        * Reschedule completion if another wakeup came in.
+                        */
+                       if (req->work_need_resched) {
+                               schedule_work(&req->work);
+                               req->work_need_resched = false;
+                       } else {
+                               req->work_scheduled = false;
+                       }
+                       poll_iocb_unlock_wq(req);
+                       spin_unlock_irq(&ctx->ctx_lock);
+                       return;
+               }
+               list_del_init(&req->wait.entry);
+               poll_iocb_unlock_wq(req);
+       } /* else, POLLFREE has freed the waitqueue, so we must complete */
        list_del_init(&iocb->ki_list);
        iocb->ki_res.res = mangle_poll(mask);
-       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
        iocb_put(iocb);
@@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
        struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
        struct poll_iocb *req = &aiocb->poll;
 
-       spin_lock(&req->head->lock);
-       WRITE_ONCE(req->cancelled, true);
-       if (!list_empty(&req->wait.entry)) {
-               list_del_init(&req->wait.entry);
-               schedule_work(&aiocb->poll.work);
-       }
-       spin_unlock(&req->head->lock);
+       if (poll_iocb_lock_wq(req)) {
+               WRITE_ONCE(req->cancelled, true);
+               if (!req->work_scheduled) {
+                       schedule_work(&aiocb->poll.work);
+                       req->work_scheduled = true;
+               }
+               poll_iocb_unlock_wq(req);
+       } /* else, the request was force-cancelled by POLLFREE already */
 
        return 0;
 }
@@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & req->events))
                return 0;
 
-       list_del_init(&req->wait.entry);
-
-       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+       /*
+        * Complete the request inline if possible.  This requires that three
+        * conditions be met:
+        *   1. An event mask must have been passed.  If a plain wakeup was done
+        *      instead, then mask == 0 and we have to call vfs_poll() to get
+        *      the events, so inline completion isn't possible.
+        *   2. The completion work must not have already been scheduled.
+        *   3. ctx_lock must not be busy.  We have to use trylock because we
+        *      already hold the waitqueue lock, so this inverts the normal
+        *      locking order.  Use irqsave/irqrestore because not all
+        *      filesystems (e.g. fuse) call this function with IRQs disabled,
+        *      yet IRQs have to be disabled before ctx_lock is obtained.
+        */
+       if (mask && !req->work_scheduled &&
+           spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                struct kioctx *ctx = iocb->ki_ctx;
 
-               /*
-                * Try to complete the iocb inline if we can. Use
-                * irqsave/irqrestore because not all filesystems (e.g. fuse)
-                * call this function with IRQs disabled and because IRQs
-                * have to be disabled before ctx_lock is obtained.
-                */
+               list_del_init(&req->wait.entry);
                list_del(&iocb->ki_list);
                iocb->ki_res.res = mangle_poll(mask);
-               req->done = true;
-               if (iocb->ki_eventfd && eventfd_signal_allowed()) {
+               if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
                        iocb = NULL;
                        INIT_WORK(&req->work, aio_poll_put_work);
                        schedule_work(&req->work);
@@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                if (iocb)
                        iocb_put(iocb);
        } else {
-               schedule_work(&req->work);
+               /*
+                * Schedule the completion work if needed.  If it was already
+                * scheduled, record that another wakeup came in.
+                *
+                * Don't remove the request from the waitqueue here, as it might
+                * not actually be complete yet (we won't know until vfs_poll()
+                * is called), and we must not miss any wakeups.  POLLFREE is an
+                * exception to this; see below.
+                */
+               if (req->work_scheduled) {
+                       req->work_need_resched = true;
+               } else {
+                       schedule_work(&req->work);
+                       req->work_scheduled = true;
+               }
+
+               /*
+                * If the waitqueue is being freed early but we can't complete
+                * the request inline, we have to tear down the request as best
+                * we can.  That means immediately removing the request from its
+                * waitqueue and preventing all further accesses to the
+                * waitqueue via the request.  We also need to schedule the
+                * completion work (done above).  Also mark the request as
+                * cancelled, to potentially skip an unneeded call to ->poll().
+                */
+               if (mask & POLLFREE) {
+                       WRITE_ONCE(req->cancelled, true);
+                       list_del_init(&req->wait.entry);
+
+                       /*
+                        * Careful: this *must* be the last step, since as soon
+                        * as req->head is NULL'ed out, the request can be
+                        * completed and freed, since aio_poll_complete_work()
+                        * will no longer need to take the waitqueue lock.
+                        */
+                       smp_store_release(&req->head, NULL);
+               }
        }
        return 1;
 }
@@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 struct aio_poll_table {
        struct poll_table_struct        pt;
        struct aio_kiocb                *iocb;
+       bool                            queued;
        int                             error;
 };
 
@@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
 
        /* multiple wait queues per file are not supported */
-       if (unlikely(pt->iocb->poll.head)) {
+       if (unlikely(pt->queued)) {
                pt->error = -EINVAL;
                return;
        }
 
+       pt->queued = true;
        pt->error = 0;
        pt->iocb->poll.head = head;
        add_wait_queue(head, &pt->iocb->poll.wait);
@@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->done = false;
        req->cancelled = false;
+       req->work_scheduled = false;
+       req->work_need_resched = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
        apt.pt._key = req->events;
        apt.iocb = aiocb;
+       apt.queued = false;
        apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
 
        /* initialized the list so that we can do list_empty checks */
@@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 
        mask = vfs_poll(req->file, &apt.pt) & req->events;
        spin_lock_irq(&ctx->ctx_lock);
-       if (likely(req->head)) {
-               spin_lock(&req->head->lock);
-               if (unlikely(list_empty(&req->wait.entry))) {
-                       if (apt.error)
+       if (likely(apt.queued)) {
+               bool on_queue = poll_iocb_lock_wq(req);
+
+               if (!on_queue || req->work_scheduled) {
+                       /*
+                        * aio_poll_wake() already either scheduled the async
+                        * completion work, or completed the request inline.
+                        */
+                       if (apt.error) /* unsupported case: multiple queues */
                                cancel = true;
                        apt.error = 0;
                        mask = 0;
                }
                if (mask || apt.error) {
+                       /* Steal to complete synchronously. */
                        list_del_init(&req->wait.entry);
                } else if (cancel) {
+                       /* Cancel if possible (may be too late though). */
                        WRITE_ONCE(req->cancelled, true);
-               } else if (!req->done) { /* actually waiting for an event */
+               } else if (on_queue) {
+                       /*
+                        * Actually waiting for an event, so add the request to
+                        * active_reqs so that it can be cancelled if needed.
+                        */
                        list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
                        aiocb->ki_cancel = aio_poll_cancel;
                }
-               spin_unlock(&req->head->lock);
+               if (on_queue)
+                       poll_iocb_unlock_wq(req);
        }
        if (mask) { /* no async, we'd stolen it */
                aiocb->ki_res.res = mangle_poll(mask);
index c3983bd..f704339 100644 (file)
@@ -463,8 +463,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                BUG_ON(ret < 0);
                rcu_assign_pointer(root->node, cow);
 
-               btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+                                     parent_start, last_ref);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -485,8 +485,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                                return ret;
                        }
                }
-               btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+                                     parent_start, last_ref);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -927,7 +927,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                free_extent_buffer(mid);
 
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
                /* once for the root ptr */
                free_extent_buffer_stale(mid);
                return 0;
@@ -986,7 +986,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        btrfs_tree_unlock(right);
                        del_ptr(root, path, level + 1, pslot + 1);
                        root_sub_used(root, right->len);
-                       btrfs_free_tree_block(trans, root, right, 0, 1);
+                       btrfs_free_tree_block(trans, btrfs_root_id(root), right,
+                                             0, 1);
                        free_extent_buffer_stale(right);
                        right = NULL;
                } else {
@@ -1031,7 +1032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                btrfs_tree_unlock(mid);
                del_ptr(root, path, level + 1, pslot);
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
                free_extent_buffer_stale(mid);
                mid = NULL;
        } else {
@@ -4032,7 +4033,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
        root_sub_used(root, leaf->len);
 
        atomic_inc(&leaf->refs);
-       btrfs_free_tree_block(trans, root, leaf, 0, 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
        free_extent_buffer_stale(leaf);
 }
 /*
index 7553e9d..5fe5ecc 100644 (file)
@@ -2257,6 +2257,11 @@ static inline bool btrfs_root_dead(const struct btrfs_root *root)
        return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
 }
 
+static inline u64 btrfs_root_id(const struct btrfs_root *root)
+{
+       return root->root_key.objectid;
+}
+
 /* struct btrfs_root_backup */
 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
                   tree_root, 64);
@@ -2719,7 +2724,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                                             u64 empty_size,
                                             enum btrfs_lock_nesting nest);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root,
+                          u64 root_id,
                           struct extent_buffer *buf,
                           u64 parent, int last_ref);
 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
index 2059d15..40c4d6b 100644 (file)
@@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
 
        /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
        ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
-       if (ret < 0)
+       if (ret < 0) {
                btrfs_free_reserved_data_space_noquota(fs_info, len);
-       else
+               extent_changeset_free(*reserved);
+               *reserved = NULL;
+       } else {
                ret = 0;
+       }
        return ret;
 }
 
@@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
        if (ret < 0)
                return ret;
        ret = btrfs_delalloc_reserve_metadata(inode, len);
-       if (ret < 0)
+       if (ret < 0) {
                btrfs_free_reserved_data_space(inode, *reserved, start, len);
+               extent_changeset_free(*reserved);
+               *reserved = NULL;
+       }
        return ret;
 }
 
index 514ead6..b3f2e22 100644 (file)
@@ -1732,6 +1732,14 @@ again:
        }
        return root;
 fail:
+       /*
+        * If our caller provided us an anonymous device, then it's his
+        * responsability to free it in case we fail. So we have to set our
+        * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+        * and once again by our caller.
+        */
+       if (anon_dev)
+               root->anon_dev = 0;
        btrfs_put_root(root);
        return ERR_PTR(ret);
 }
index 3fd736a..25ef6e3 100644 (file)
@@ -3275,20 +3275,20 @@ out_delayed_unlock:
 }
 
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root,
+                          u64 root_id,
                           struct extent_buffer *buf,
                           u64 parent, int last_ref)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_ref generic_ref = { 0 };
        int ret;
 
        btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
                               buf->start, buf->len, parent);
        btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
-                           root->root_key.objectid, 0, false);
+                           root_id, 0, false);
 
-       if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+       if (root_id != BTRFS_TREE_LOG_OBJECTID) {
                btrfs_ref_tree_mod(fs_info, &generic_ref);
                ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
                BUG_ON(ret); /* -ENOMEM */
@@ -3298,7 +3298,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                struct btrfs_block_group *cache;
                bool must_pin = false;
 
-               if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+               if (root_id != BTRFS_TREE_LOG_OBJECTID) {
                        ret = check_ref_cleanup(trans, buf->start);
                        if (!ret) {
                                btrfs_redirty_list_add(trans->transaction, buf);
@@ -5472,7 +5472,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                        goto owner_mismatch;
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
+                             wc->refs[level] == 1);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
@@ -6051,6 +6052,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        int dev_ret = 0;
        int ret = 0;
 
+       if (range->start == U64_MAX)
+               return -EINVAL;
+
        /*
         * Check range overflow if range->len is set.
         * The default range->len is U64_MAX.
index 4e03a6d..9234d96 100644 (file)
@@ -4313,6 +4313,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
        if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
                return;
 
+       /*
+        * A read may stumble upon this buffer later, make sure that it gets an
+        * error and knows there was an error.
+        */
+       clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+
+       /*
+        * We need to set the mapping with the io error as well because a write
+        * error will flip the file system readonly, and then syncfs() will
+        * return a 0 because we are readonly if we don't modify the err seq for
+        * the superblock.
+        */
+       mapping_set_error(page->mapping, -EIO);
+
        /*
         * If we error out, we should add back the dirty_metadata_bytes
         * to make it consistent.
@@ -6597,6 +6611,14 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
        if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
                return 0;
 
+       /*
+        * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
+        * operation, which could potentially still be in flight.  In this case
+        * we simply want to return an error.
+        */
+       if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
+               return -EIO;
+
        if (eb->fs_info->sectorsize < PAGE_SIZE)
                return read_extent_buffer_subpage(eb, wait, mirror_num);
 
index a33bca9..3abec44 100644 (file)
@@ -1256,8 +1256,8 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
        btrfs_tree_lock(free_space_root->node);
        btrfs_clean_tree_block(free_space_root->node);
        btrfs_tree_unlock(free_space_root->node);
-       btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
-                             0, 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
+                             free_space_root->node, 0, 1);
 
        btrfs_put_root(free_space_root);
 
index 92138ac..edfecfe 100644 (file)
@@ -617,11 +617,13 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
                 * Since we don't abort the transaction in this case, free the
                 * tree block so that we don't leak space and leave the
                 * filesystem in an inconsistent state (an extent item in the
-                * extent tree without backreferences). Also no need to have
-                * the tree block locked since it is not in any tree at this
-                * point, so no other task can find it and use it.
+                * extent tree with a backreference for a root that does not
+                * exists).
                 */
-               btrfs_free_tree_block(trans, root, leaf, 0, 1);
+               btrfs_tree_lock(leaf);
+               btrfs_clean_tree_block(leaf);
+               btrfs_tree_unlock(leaf);
+               btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
                free_extent_buffer(leaf);
                goto fail;
        }
@@ -3187,10 +3189,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
                return -EPERM;
 
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args)) {
-               ret = PTR_ERR(vol_args);
-               goto out;
-       }
+       if (IS_ERR(vol_args))
+               return PTR_ERR(vol_args);
 
        if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
                ret = -EOPNOTSUPP;
index db680f5..6c037f1 100644 (file)
@@ -1219,7 +1219,8 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        btrfs_tree_lock(quota_root->node);
        btrfs_clean_tree_block(quota_root->node);
        btrfs_tree_unlock(quota_root->node);
-       btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+                             quota_root->node, 0, 1);
 
        btrfs_put_root(quota_root);
 
index 12ceb14..d201663 100644 (file)
@@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
        key.offset = ref_id;
 again:
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
-       BUG_ON(ret < 0);
+       if (ret < 0)
+               goto out;
        if (ret == 0) {
                leaf = path->nodes[0];
                ref = btrfs_item_ptr(leaf, path->slots[0],
index 8ab33ca..6993dcd 100644 (file)
@@ -1181,6 +1181,7 @@ again:
                                             parent_objectid, victim_name,
                                             victim_name_len);
                        if (ret < 0) {
+                               kfree(victim_name);
                                return ret;
                        } else if (!ret) {
                                ret = -ENOENT;
@@ -2908,6 +2909,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
                                                     path->nodes[*level]->len);
                                        if (ret)
                                                return ret;
+                                       btrfs_redirty_list_add(trans->transaction,
+                                                              next);
                                } else {
                                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
                                                clear_extent_buffer_dirty(next);
@@ -2988,6 +2991,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
                                                next->start, next->len);
                                if (ret)
                                        goto out;
+                               btrfs_redirty_list_add(trans->transaction, next);
                        } else {
                                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
                                        clear_extent_buffer_dirty(next);
@@ -3438,8 +3442,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
                          EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
        extent_io_tree_release(&log->log_csum_range);
 
-       if (trans && log->node)
-               btrfs_redirty_list_add(trans->transaction, log->node);
        btrfs_put_root(log);
 }
 
@@ -3976,6 +3978,7 @@ search:
                        goto done;
                }
                if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
+                       ctx->last_dir_item_offset = min_key.offset;
                        ret = overwrite_item(trans, log, dst_path,
                                             path->nodes[0], path->slots[0],
                                             &min_key);
index 0997e3c..fd0ced8 100644 (file)
@@ -1370,8 +1370,10 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
 
        bytenr_orig = btrfs_sb_offset(0);
        ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
-       if (ret)
-               return ERR_PTR(ret);
+       if (ret) {
+               device = ERR_PTR(ret);
+               goto error_bdev_put;
+       }
 
        disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
        if (IS_ERR(disk_super)) {
index 67d932d..678a294 100644 (file)
@@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
        block_group->alloc_offset = block_group->zone_capacity;
        block_group->free_space_ctl->free_space = 0;
        btrfs_clear_treelog_bg(block_group);
+       btrfs_clear_data_reloc_bg(block_group);
        spin_unlock(&block_group->lock);
 
        ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
@@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
        ASSERT(block_group->alloc_offset == block_group->zone_capacity);
        ASSERT(block_group->free_space_ctl->free_space == 0);
        btrfs_clear_treelog_bg(block_group);
+       btrfs_clear_data_reloc_bg(block_group);
        spin_unlock(&block_group->lock);
 
        map = block_group->physical_map;
index b9460b6..c447fa2 100644 (file)
@@ -4350,7 +4350,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
        int bits = (fmode << 1) | 1;
-       bool is_opened = false;
+       bool already_opened = false;
        int i;
 
        if (count == 1)
@@ -4358,19 +4358,19 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
 
        spin_lock(&ci->i_ceph_lock);
        for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
-               if (bits & (1 << i))
-                       ci->i_nr_by_mode[i] += count;
-
                /*
-                * If any of the mode ref is larger than 1,
+                * If any of the mode ref is larger than 0,
                 * that means it has been already opened by
                 * others. Just skip checking the PIN ref.
                 */
-               if (i && ci->i_nr_by_mode[i] > 1)
-                       is_opened = true;
+               if (i && ci->i_nr_by_mode[i])
+                       already_opened = true;
+
+               if (bits & (1 << i))
+                       ci->i_nr_by_mode[i] += count;
        }
 
-       if (!is_opened)
+       if (!already_opened)
                percpu_counter_inc(&mdsc->metric.opened_inodes);
        spin_unlock(&ci->i_ceph_lock);
 }
index 02a0a0f..c138e81 100644 (file)
@@ -605,13 +605,25 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
        in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
        in.cap.flags = CEPH_CAP_FLAG_AUTH;
        in.ctime = in.mtime = in.atime = iinfo.btime;
-       in.mode = cpu_to_le32((u32)mode);
        in.truncate_seq = cpu_to_le32(1);
        in.truncate_size = cpu_to_le64(-1ULL);
        in.xattr_version = cpu_to_le64(1);
        in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
-       in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
-                               dir->i_gid : current_fsgid()));
+       if (dir->i_mode & S_ISGID) {
+               in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
+
+               /* Directories always inherit the setgid bit. */
+               if (S_ISDIR(mode))
+                       mode |= S_ISGID;
+               else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+                        !in_group_p(dir->i_gid) &&
+                        !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
+                       mode &= ~S_ISGID;
+       } else {
+               in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
+       }
+       in.mode = cpu_to_le32((u32)mode);
+
        in.nlink = cpu_to_le32(1);
        in.max_size = cpu_to_le64(lo->stripe_unit);
 
@@ -847,7 +859,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
        ssize_t ret;
        u64 off = iocb->ki_pos;
        u64 len = iov_iter_count(to);
-       u64 i_size;
+       u64 i_size = i_size_read(inode);
 
        dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
index 250aad3..c30eefc 100644 (file)
@@ -3683,7 +3683,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
        struct ceph_pagelist *pagelist = recon_state->pagelist;
        struct dentry *dentry;
        char *path;
-       int pathlen, err;
+       int pathlen = 0, err;
        u64 pathbase;
        u64 snap_follows;
 
@@ -3703,7 +3703,6 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
        } else {
                path = NULL;
-               pathlen = 0;
                pathbase = 0;
        }
 
index 18448db..1060164 100644 (file)
@@ -3064,6 +3064,13 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
            (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
                cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
 
+       /*
+        * The cookie is initialized from volume info returned above.
+        * Inside cifs_fscache_get_super_cookie it checks
+        * that we do not get super cookie twice.
+        */
+       cifs_fscache_get_super_cookie(tcon);
+
 out:
        mnt_ctx->server = server;
        mnt_ctx->ses = ses;
index 6a179ae..e3ed25d 100644 (file)
@@ -434,6 +434,42 @@ out:
        return rc;
 }
 
+/*
+ * Remove duplicate path delimiters. Windows is supposed to do that
+ * but there are some bugs that prevent rename from working if there are
+ * multiple delimiters.
+ *
+ * Returns a sanitized duplicate of @path. The caller is responsible for
+ * cleaning up the original.
+ */
+#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+static char *sanitize_path(char *path)
+{
+       char *cursor1 = path, *cursor2 = path;
+
+       /* skip all prepended delimiters */
+       while (IS_DELIM(*cursor1))
+               cursor1++;
+
+       /* copy the first letter */
+       *cursor2 = *cursor1;
+
+       /* copy the remainder... */
+       while (*(cursor1++)) {
+               /* ... skipping all duplicated delimiters */
+               if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2))
+                       continue;
+               *(++cursor2) = *cursor1;
+       }
+
+       /* if the last character is a delimiter, skip it */
+       if (IS_DELIM(*(cursor2 - 1)))
+               cursor2--;
+
+       *(cursor2) = '\0';
+       return kstrdup(path, GFP_KERNEL);
+}
+
 /*
  * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
  * fields with the result. Returns 0 on success and an error otherwise
@@ -493,7 +529,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
        if (!*pos)
                return 0;
 
-       ctx->prepath = kstrdup(pos, GFP_KERNEL);
+       ctx->prepath = sanitize_path(pos);
        if (!ctx->prepath)
                return -ENOMEM;
 
index 96d083d..279622e 100644 (file)
@@ -1356,11 +1356,6 @@ iget_no_retry:
                goto out;
        }
 
-#ifdef CONFIG_CIFS_FSCACHE
-       /* populate tcon->resource_id */
-       tcon->resource_id = CIFS_I(inode)->uniqueid;
-#endif
-
        if (rc && tcon->pipe) {
                cifs_dbg(FYI, "ipc connection - fake read inode\n");
                spin_lock(&inode->i_lock);
@@ -1375,14 +1370,6 @@ iget_no_retry:
                iget_failed(inode);
                inode = ERR_PTR(rc);
        }
-
-       /*
-        * The cookie is initialized from volume info returned above.
-        * Inside cifs_fscache_get_super_cookie it checks
-        * that we do not get super cookie twice.
-        */
-       cifs_fscache_get_super_cookie(tcon);
-
 out:
        kfree(path);
        free_xid(xid);
index af63548..035dc3e 100644 (file)
@@ -590,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
 {
        unsigned int tioffset; /* challenge message target info area */
        unsigned int tilen; /* challenge message target info area length  */
-
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
+       __u32 server_flags;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
                cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
@@ -609,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                return -EINVAL;
        }
 
+       server_flags = le32_to_cpu(pblob->NegotiateFlags);
+       cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
+                ses->ntlmssp->client_flags, server_flags);
+
+       if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
+           (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
+               cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
+                        __func__);
+               return -EINVAL;
+       }
+       if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
+               cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
+               return -EINVAL;
+       }
+       if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
+               cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
+                        __func__);
+               return -EOPNOTSUPP;
+       }
+       if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+           !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
+               pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
+                            __func__);
+
+       ses->ntlmssp->server_flags = server_flags;
+
        memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
-       /* BB we could decode pblob->NegotiateFlags; some may be useful */
        /* In particular we can examine sign flags */
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
-       ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
        tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
        tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
        if (tioffset > blob_len || tioffset + tilen > blob_len) {
@@ -721,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
-               NTLMSSP_NEGOTIATE_SEAL;
-       if (server->sign)
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
+               NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+               NTLMSSP_NEGOTIATE_SIGN;
        if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
                flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
 
        tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
+       ses->ntlmssp->client_flags = flags;
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
        /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
@@ -779,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
 
-       flags = NTLMSSP_NEGOTIATE_56 |
-               NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
-               NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
-               NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
-       if (ses->server->sign)
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
-               flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+       flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+               NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
 
        tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -834,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
                                      *pbuffer, &tmp,
                                      nls_cp);
 
-       if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
-               (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
-                       && !calc_seckey(ses)) {
+       if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+           (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
+           !calc_seckey(ses)) {
                memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
                sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
index ad4a8bf..97d212a 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -841,28 +841,68 @@ void do_close_on_exec(struct files_struct *files)
        spin_unlock(&files->file_lock);
 }
 
-static struct file *__fget_files(struct files_struct *files, unsigned int fd,
-                                fmode_t mask, unsigned int refs)
+static inline struct file *__fget_files_rcu(struct files_struct *files,
+       unsigned int fd, fmode_t mask, unsigned int refs)
 {
-       struct file *file;
+       for (;;) {
+               struct file *file;
+               struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+               struct file __rcu **fdentry;
 
-       rcu_read_lock();
-loop:
-       file = files_lookup_fd_rcu(files, fd);
-       if (file) {
-               /* File object ref couldn't be taken.
-                * dup2() atomicity guarantee is the reason
-                * we loop to catch the new file (or NULL pointer)
+               if (unlikely(fd >= fdt->max_fds))
+                       return NULL;
+
+               fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
+               file = rcu_dereference_raw(*fdentry);
+               if (unlikely(!file))
+                       return NULL;
+
+               if (unlikely(file->f_mode & mask))
+                       return NULL;
+
+               /*
+                * Ok, we have a file pointer. However, because we do
+                * this all locklessly under RCU, we may be racing with
+                * that file being closed.
+                *
+                * Such a race can take two forms:
+                *
+                *  (a) the file ref already went down to zero,
+                *      and get_file_rcu_many() fails. Just try
+                *      again:
                 */
-               if (file->f_mode & mask)
-                       file = NULL;
-               else if (!get_file_rcu_many(file, refs))
-                       goto loop;
-               else if (files_lookup_fd_raw(files, fd) != file) {
+               if (unlikely(!get_file_rcu_many(file, refs)))
+                       continue;
+
+               /*
+                *  (b) the file table entry has changed under us.
+                *       Note that we don't need to re-check the 'fdt->fd'
+                *       pointer having changed, because it always goes
+                *       hand-in-hand with 'fdt'.
+                *
+                * If so, we need to put our refs and try again.
+                */
+               if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
+                   unlikely(rcu_dereference_raw(*fdentry) != file)) {
                        fput_many(file, refs);
-                       goto loop;
+                       continue;
                }
+
+               /*
+                * Ok, we have a ref to the file, and checked that it
+                * still exists.
+                */
+               return file;
        }
+}
+
+static struct file *__fget_files(struct files_struct *files, unsigned int fd,
+                                fmode_t mask, unsigned int refs)
+{
+       struct file *file;
+
+       rcu_read_lock();
+       file = __fget_files_rcu(files, fd, mask, refs);
        rcu_read_unlock();
 
        return file;
index 50cf9f9..5c4f582 100644 (file)
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
                                        struct io_wqe_acct *acct,
                                        struct io_cb_cancel_data *match);
 static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
            test_and_set_bit_lock(0, &worker->create_state))
                goto fail_release;
 
+       atomic_inc(&wq->worker_refs);
        init_task_work(&worker->create_work, func);
        worker->create_index = acct->index;
        if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
-               clear_bit_unlock(0, &worker->create_state);
+               /*
+                * EXIT may have been set after checking it above, check after
+                * adding the task_work and remove any creation item if it is
+                * now set. wq exit does that too, but we can have added this
+                * work item after we canceled in io_wq_exit_workers().
+                */
+               if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+                       io_wq_cancel_tw_create(wq);
+               io_worker_ref_put(wq);
                return true;
        }
+       io_worker_ref_put(wq);
        clear_bit_unlock(0, &worker->create_state);
 fail_release:
        io_worker_release(worker);
@@ -384,7 +395,9 @@ static void io_wqe_dec_running(struct io_worker *worker)
        if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
                atomic_inc(&acct->nr_running);
                atomic_inc(&wqe->wq->worker_refs);
+               raw_spin_unlock(&wqe->lock);
                io_queue_worker_create(worker, acct, create_worker_cb);
+               raw_spin_lock(&wqe->lock);
        }
 }
 
@@ -1198,13 +1211,9 @@ void io_wq_exit_start(struct io_wq *wq)
        set_bit(IO_WQ_BIT_EXIT, &wq->state);
 }
 
-static void io_wq_exit_workers(struct io_wq *wq)
+static void io_wq_cancel_tw_create(struct io_wq *wq)
 {
        struct callback_head *cb;
-       int node;
-
-       if (!wq->task)
-               return;
 
        while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
                struct io_worker *worker;
@@ -1212,6 +1221,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
                worker = container_of(cb, struct io_worker, create_work);
                io_worker_cancel_cb(worker);
        }
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+       int node;
+
+       if (!wq->task)
+               return;
+
+       io_wq_cancel_tw_create(wq);
 
        rcu_read_lock();
        for_each_node(node) {
index c4f2176..fb2a0cb 100644 (file)
@@ -2891,9 +2891,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
-       if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
-               req->flags |= REQ_F_CUR_POS;
-               kiocb->ki_pos = file->f_pos;
+       if (kiocb->ki_pos == -1) {
+               if (!(file->f_mode & FMODE_STREAM)) {
+                       req->flags |= REQ_F_CUR_POS;
+                       kiocb->ki_pos = file->f_pos;
+               } else {
+                       kiocb->ki_pos = 0;
+               }
        }
        kiocb->ki_flags = iocb_flags(file);
        ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
@@ -9824,7 +9828,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
 
 /*
  * Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
  */
 static __cold void io_uring_cancel_generic(bool cancel_all,
                                           struct io_sq_data *sqd)
@@ -9866,8 +9870,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
                                                             cancel_all);
                }
 
-               prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+               io_run_task_work();
                io_uring_drop_tctx_refs(current);
+
                /*
                 * If we've seen completions, retry without waiting. This
                 * avoids a race where a completion comes in before we did
index 8317f7c..5052be9 100644 (file)
@@ -148,7 +148,7 @@ static int ndr_read_int16(struct ndr *n, __u16 *value)
 static int ndr_read_int32(struct ndr *n, __u32 *value)
 {
        if (n->offset + sizeof(__u32) > n->length)
-               return 0;
+               return -EINVAL;
 
        if (value)
                *value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
index 0a5d845..02a44d2 100644 (file)
@@ -271,9 +271,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
                conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
 
-       if (conn->cipher_type)
-               conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
-
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
                conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
 
index 49c9da3..b8b3a4c 100644 (file)
@@ -915,6 +915,25 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
        }
 }
 
+/**
+ * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption
+ * @conn:      smb connection
+ *
+ * Return:     true if connection should be encrypted, else false
+ */
+static bool smb3_encryption_negotiated(struct ksmbd_conn *conn)
+{
+       if (!conn->ops->generate_encryptionkey)
+               return false;
+
+       /*
+        * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag.
+        * SMB 3.1.1 uses the cipher_type field.
+        */
+       return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) ||
+           conn->cipher_type;
+}
+
 static void decode_compress_ctxt(struct ksmbd_conn *conn,
                                 struct smb2_compression_capabilities_context *pneg_ctxt)
 {
@@ -1469,8 +1488,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
                    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
                        sess->sign = true;
 
-               if (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION &&
-                   conn->ops->generate_encryptionkey &&
+               if (smb3_encryption_negotiated(conn) &&
                    !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
                        rc = conn->ops->generate_encryptionkey(sess);
                        if (rc) {
@@ -1559,8 +1577,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
            (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
                sess->sign = true;
 
-       if ((conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) &&
-           conn->ops->generate_encryptionkey) {
+       if (smb3_encryption_negotiated(conn)) {
                retval = conn->ops->generate_encryptionkey(sess);
                if (retval) {
                        ksmbd_debug(SMB,
@@ -2962,6 +2979,10 @@ int smb2_open(struct ksmbd_work *work)
                                                            &pntsd_size, &fattr);
                                        posix_acl_release(fattr.cf_acls);
                                        posix_acl_release(fattr.cf_dacls);
+                                       if (rc) {
+                                               kfree(pntsd);
+                                               goto err_out;
+                                       }
 
                                        rc = ksmbd_vfs_set_sd_xattr(conn,
                                                                    user_ns,
index 659a8f3..b696543 100644 (file)
@@ -4263,12 +4263,11 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
                return err;
 
        err = user_path_at(dfd, path, kattr.lookup_flags, &target);
-       if (err)
-               return err;
-
-       err = do_mount_setattr(&target, &kattr);
+       if (!err) {
+               err = do_mount_setattr(&target, &kattr);
+               path_put(&target);
+       }
        finish_mount_kattr(&kattr);
-       path_put(&target);
        return err;
 }
 
index 7046f9b..75c76cb 100644 (file)
@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work)
        netfs_rreq_do_write_to_cache(rreq);
 }
 
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
-                                     bool was_async)
+static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
 {
-       if (was_async) {
-               rreq->work.func = netfs_rreq_write_to_cache_work;
-               if (!queue_work(system_unbound_wq, &rreq->work))
-                       BUG();
-       } else {
-               netfs_rreq_do_write_to_cache(rreq);
-       }
+       rreq->work.func = netfs_rreq_write_to_cache_work;
+       if (!queue_work(system_unbound_wq, &rreq->work))
+               BUG();
 }
 
 /*
@@ -558,7 +553,7 @@ again:
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
        if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq, was_async);
+               return netfs_rreq_write_to_cache(rreq);
 
        netfs_rreq_completed(rreq, was_async);
 }
@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file,
        rreq = netfs_alloc_read_request(ops, netfs_priv, file);
        if (!rreq) {
                if (netfs_priv)
-                       ops->cleanup(netfs_priv, folio_file_mapping(folio));
+                       ops->cleanup(folio_file_mapping(folio), netfs_priv);
                folio_unlock(folio);
                return -ENOMEM;
        }
@@ -1191,7 +1186,7 @@ have_folio:
                goto error;
 have_folio_no_wait:
        if (netfs_priv)
-               ops->cleanup(netfs_priv, mapping);
+               ops->cleanup(mapping, netfs_priv);
        *_folio = folio;
        _leave(" = 0");
        return 0;
@@ -1202,7 +1197,7 @@ error:
        folio_unlock(folio);
        folio_put(folio);
        if (netfs_priv)
-               ops->cleanup(netfs_priv, mapping);
+               ops->cleanup(mapping, netfs_priv);
        _leave(" = %d", ret);
        return ret;
 }
index 4418517..15dac36 100644 (file)
@@ -438,22 +438,19 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
 
 static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
                                     struct nfsd3_readdirres *resp,
-                                    int count)
+                                    u32 count)
 {
        struct xdr_buf *buf = &resp->dirlist;
        struct xdr_stream *xdr = &resp->xdr;
 
-       count = min_t(u32, count, svc_max_payload(rqstp));
+       count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
 
        memset(buf, 0, sizeof(*buf));
 
        /* Reserve room for the NULL ptr & eof flag (-2 words) */
        buf->buflen = count - XDR_UNIT * 2;
        buf->pages = rqstp->rq_next_page;
-       while (count > 0) {
-               rqstp->rq_next_page++;
-               count -= PAGE_SIZE;
-       }
+       rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        /* This is xdr_init_encode(), but it assumes that
         * the head kvec has already been consumed. */
@@ -462,7 +459,7 @@ static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
        xdr->page_ptr = buf->pages;
        xdr->iov = NULL;
        xdr->p = page_address(*buf->pages);
-       xdr->end = xdr->p + (PAGE_SIZE >> 2);
+       xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
        xdr->rqst = NULL;
 }
 
index 6fedc49..c634483 100644 (file)
@@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
 int
 register_cld_notifier(void)
 {
+       WARN_ON(!nfsd_net_id);
        return rpc_pipefs_notifier_register(&nfsd4_cld_block);
 }
 
index bfad94c..1956d37 100644 (file)
@@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
        return 0;
 }
 
+static bool delegation_hashed(struct nfs4_delegation *dp)
+{
+       return !(list_empty(&dp->dl_perfile));
+}
+
 static bool
 unhash_delegation_locked(struct nfs4_delegation *dp)
 {
@@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
 
        lockdep_assert_held(&state_lock);
 
-       if (list_empty(&dp->dl_perfile))
+       if (!delegation_hashed(dp))
                return false;
 
        dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
@@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
         * queued for a lease break. Don't queue it again.
         */
        spin_lock(&state_lock);
-       if (dp->dl_time == 0) {
+       if (delegation_hashed(dp) && dp->dl_time == 0) {
                dp->dl_time = ktime_get_boottime_seconds();
                list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
        }
index af8531c..51a49e0 100644 (file)
@@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
        int retval;
        printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
 
-       retval = register_cld_notifier();
-       if (retval)
-               return retval;
        retval = nfsd4_init_slabs();
        if (retval)
-               goto out_unregister_notifier;
+               return retval;
        retval = nfsd4_init_pnfs();
        if (retval)
                goto out_free_slabs;
@@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
                goto out_free_exports;
        retval = register_pernet_subsys(&nfsd_net_ops);
        if (retval < 0)
+               goto out_free_filesystem;
+       retval = register_cld_notifier();
+       if (retval)
                goto out_free_all;
        return 0;
 out_free_all:
+       unregister_pernet_subsys(&nfsd_net_ops);
+out_free_filesystem:
        unregister_filesystem(&nfsd_fs_type);
 out_free_exports:
        remove_proc_entry("fs/nfs/exports", NULL);
@@ -1561,13 +1563,12 @@ out_free_pnfs:
        nfsd4_exit_pnfs();
 out_free_slabs:
        nfsd4_free_slabs();
-out_unregister_notifier:
-       unregister_cld_notifier();
        return retval;
 }
 
 static void __exit exit_nfsd(void)
 {
+       unregister_cld_notifier();
        unregister_pernet_subsys(&nfsd_net_ops);
        nfsd_drc_slab_free();
        remove_proc_entry("fs/nfs/exports", NULL);
@@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
        nfsd4_free_slabs();
        nfsd4_exit_pnfs();
        unregister_filesystem(&nfsd_fs_type);
-       unregister_cld_notifier();
 }
 
 MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
index eea5b59..de282f3 100644 (file)
@@ -556,17 +556,17 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
 
 static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
                                    struct nfsd_readdirres *resp,
-                                   int count)
+                                   u32 count)
 {
        struct xdr_buf *buf = &resp->dirlist;
        struct xdr_stream *xdr = &resp->xdr;
 
-       count = min_t(u32, count, PAGE_SIZE);
+       count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
 
        memset(buf, 0, sizeof(*buf));
 
        /* Reserve room for the NULL ptr & eof flag (-2 words) */
-       buf->buflen = count - sizeof(__be32) * 2;
+       buf->buflen = count - XDR_UNIT * 2;
        buf->pages = rqstp->rq_next_page;
        rqstp->rq_next_page++;
 
@@ -577,7 +577,7 @@ static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
        xdr->page_ptr = buf->pages;
        xdr->iov = NULL;
        xdr->p = page_address(*buf->pages);
-       xdr->end = xdr->p + (PAGE_SIZE >> 2);
+       xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
        xdr->rqst = NULL;
 }
 
index 040e1cf..65ce0e7 100644 (file)
 
 void signalfd_cleanup(struct sighand_struct *sighand)
 {
-       wait_queue_head_t *wqh = &sighand->signalfd_wqh;
-       /*
-        * The lockless check can race with remove_wait_queue() in progress,
-        * but in this case its caller should run under rcu_read_lock() and
-        * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
-        */
-       if (likely(!waitqueue_active(wqh)))
-               return;
-
-       /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
-       wake_up_poll(wqh, EPOLLHUP | POLLFREE);
+       wake_up_pollfree(&sighand->signalfd_wqh);
 }
 
 struct signalfd_ctx {
index 85ba15a..043e4cb 100644 (file)
@@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
        ctx->y = y;
 }
 EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
-
-static int __init
-init_smbfs_common(void)
-{
-       return 0;
-}
-static void __init
-exit_smbfs_common(void)
-{
-}
-
-module_init(init_smbfs_common)
-module_exit(exit_smbfs_common)
index 925a621..3616839 100644 (file)
@@ -161,6 +161,77 @@ struct tracefs_fs_info {
        struct tracefs_mount_opts mount_opts;
 };
 
+static void change_gid(struct dentry *dentry, kgid_t gid)
+{
+       if (!dentry->d_inode)
+               return;
+       dentry->d_inode->i_gid = gid;
+}
+
+/*
+ * Taken from d_walk, but without he need for handling renames.
+ * Nothing can be renamed while walking the list, as tracefs
+ * does not support renames. This is only called when mounting
+ * or remounting the file system, to set all the files to
+ * the given gid.
+ */
+static void set_gid(struct dentry *parent, kgid_t gid)
+{
+       struct dentry *this_parent;
+       struct list_head *next;
+
+       this_parent = parent;
+       spin_lock(&this_parent->d_lock);
+
+       change_gid(this_parent, gid);
+repeat:
+       next = this_parent->d_subdirs.next;
+resume:
+       while (next != &this_parent->d_subdirs) {
+               struct list_head *tmp = next;
+               struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+               next = tmp->next;
+
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+               change_gid(dentry, gid);
+
+               if (!list_empty(&dentry->d_subdirs)) {
+                       spin_unlock(&this_parent->d_lock);
+                       spin_release(&dentry->d_lock.dep_map, _RET_IP_);
+                       this_parent = dentry;
+                       spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+                       goto repeat;
+               }
+               spin_unlock(&dentry->d_lock);
+       }
+       /*
+        * All done at this level ... ascend and resume the search.
+        */
+       rcu_read_lock();
+ascend:
+       if (this_parent != parent) {
+               struct dentry *child = this_parent;
+               this_parent = child->d_parent;
+
+               spin_unlock(&child->d_lock);
+               spin_lock(&this_parent->d_lock);
+
+               /* go into the first sibling still alive */
+               do {
+                       next = child->d_child.next;
+                       if (next == &this_parent->d_subdirs)
+                               goto ascend;
+                       child = list_entry(next, struct dentry, d_child);
+               } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
+               rcu_read_unlock();
+               goto resume;
+       }
+       rcu_read_unlock();
+       spin_unlock(&this_parent->d_lock);
+       return;
+}
+
 static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
 {
        substring_t args[MAX_OPT_ARGS];
@@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
                        if (!gid_valid(gid))
                                return -EINVAL;
                        opts->gid = gid;
+                       set_gid(tracefs_mount->mnt_root, gid);
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
@@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
        inode->i_mode = mode;
        inode->i_fop = fops ? fops : &tracefs_file_operations;
        inode->i_private = data;
+       inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+       inode->i_gid = d_inode(dentry->d_parent)->i_gid;
        d_instantiate(dentry, inode);
        fsnotify_create(dentry->d_parent->d_inode, dentry);
        return end_creating(dentry);
@@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
        inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
        inode->i_op = ops;
        inode->i_fop = &simple_dir_operations;
+       inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+       inode->i_gid = d_inode(dentry->d_parent)->i_gid;
 
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
index e21459f..778b57b 100644 (file)
@@ -1765,7 +1765,10 @@ static int
 xfs_remount_ro(
        struct xfs_mount        *mp)
 {
-       int error;
+       struct xfs_icwalk       icw = {
+               .icw_flags      = XFS_ICWALK_FLAG_SYNC,
+       };
+       int                     error;
 
        /*
         * Cancel background eofb scanning so it cannot race with the final
@@ -1773,8 +1776,13 @@ xfs_remount_ro(
         */
        xfs_blockgc_stop(mp);
 
-       /* Get rid of any leftover CoW reservations... */
-       error = xfs_blockgc_free_space(mp, NULL);
+       /*
+        * Clear out all remaining COW staging extents and speculative post-EOF
+        * preallocations so that we don't leave inodes requiring inactivation
+        * cleanups during reclaim on a read-only mount.  We must process every
+        * cached inode, so this requires a synchronous cache scan.
+        */
+       error = xfs_blockgc_free_space(mp, &icw);
        if (error) {
                xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
                return error;
index 259ee2b..b76dfb3 100644 (file)
@@ -1787,5 +1787,6 @@ static void __exit zonefs_exit(void)
 MODULE_AUTHOR("Damien Le Moal");
 MODULE_DESCRIPTION("Zone file system for zoned block devices");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS("zonefs");
 module_init(zonefs_init);
 module_exit(zonefs_exit);
index e7a163a..755f38e 100644 (file)
@@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
 struct bpf_trampoline *bpf_trampoline_get(u64 key,
                                          struct bpf_attach_target_info *tgt_info);
 void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
 #define BPF_DISPATCHER_INIT(_name) {                           \
        .mutex = __MUTEX_INITIALIZER(_name.mutex),              \
        .func = &_name##_func,                                  \
@@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex;
  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
  * these events can happen inside a region which holds a map bucket lock
  * and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
  */
 static inline void bpf_disable_instrumentation(void)
 {
        migrate_disable();
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               this_cpu_inc(bpf_prog_active);
-       else
-               __this_cpu_inc(bpf_prog_active);
+       this_cpu_inc(bpf_prog_active);
 }
 
 static inline void bpf_enable_instrumentation(void)
 {
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               this_cpu_dec(bpf_prog_active);
-       else
-               __this_cpu_dec(bpf_prog_active);
+       this_cpu_dec(bpf_prog_active);
        migrate_enable();
 }
 
index 203eef9..0e1b628 100644 (file)
@@ -245,7 +245,10 @@ struct kfunc_btf_id_set {
        struct module *owner;
 };
 
-struct kfunc_btf_id_list;
+struct kfunc_btf_id_list {
+       struct list_head list;
+       struct mutex mutex;
+};
 
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
                                 struct kfunc_btf_id_set *s);
 bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
                              struct module *owner);
+
+extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
+extern struct kfunc_btf_id_list prog_test_kfunc_list;
 #else
 static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
                                             struct kfunc_btf_id_set *s)
@@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
 {
        return false;
 }
+
+static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused;
+static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused;
 #endif
 
 #define DEFINE_KFUNC_BTF_ID_SET(set, name)                                     \
        struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set),     \
                                         THIS_MODULE }
 
-extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
-extern struct kfunc_btf_id_list prog_test_kfunc_list;
-
 #endif
index 2f909ed..4ff37cb 100644 (file)
@@ -3,7 +3,6 @@
 #define _LINUX_CACHEINFO_H
 
 #include <linux/bitops.h>
-#include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/smp.h>
 
index 3d5af56..429dceb 100644 (file)
@@ -121,7 +121,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        asm volatile(__stringify_label(c) ":\n\t"                       \
                     ".pushsection .discard.reachable\n\t"              \
                     ".long " __stringify_label(c) "b - .\n\t"          \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define annotate_reachable() __annotate_reachable(__COUNTER__)
 
@@ -129,7 +129,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        asm volatile(__stringify_label(c) ":\n\t"                       \
                     ".pushsection .discard.unreachable\n\t"            \
                     ".long " __stringify_label(c) "b - .\n\t"          \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
 
index 8eacf67..039e7e0 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <linux/math.h>
+#include <linux/sched.h>
 
 extern unsigned long loops_per_jiffy;
 
@@ -58,7 +59,18 @@ void calibrate_delay(void);
 void __attribute__((weak)) calibration_delay_done(void);
 void msleep(unsigned int msecs);
 unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+                       unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+       usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+       usleep_range_state(min, max, TASK_IDLE);
+}
 
 static inline void ssleep(unsigned int seconds)
 {
index a498ebc..15e7c5e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/klist.h>
 #include <linux/pm.h>
 #include <linux/device/bus.h>
+#include <linux/module.h>
 
 /**
  * enum probe_type - device driver probe type to try
index dbd39b2..ef8dbc0 100644 (file)
@@ -1283,4 +1283,10 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
 }
 #endif
 
+#ifdef CONFIG_SYSFB
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+#else
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { }
+#endif
+
 #endif /* _LINUX_EFI_H */
index 6f3db99..3da9584 100644 (file)
@@ -610,6 +610,7 @@ extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
                                               const char *name);
 extern int remove_conflicting_framebuffers(struct apertures_struct *a,
                                           const char *name, bool primary);
+extern bool is_firmware_framebuffer(struct apertures_struct *a);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
 extern int fb_show_logo(struct fb_info *fb_info, int rotate);
 extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
index 24b7ed2..7f1e88e 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_FILTER_H__
 
 #include <linux/atomic.h>
+#include <linux/bpf.h>
 #include <linux/refcount.h>
 #include <linux/compat.h>
 #include <linux/skbuff.h>
@@ -26,7 +27,6 @@
 
 #include <asm/byteorder.h>
 #include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
 
 struct sk_buff;
 struct sock;
@@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
  * This uses migrate_disable/enable() explicitly to document that the
  * invocation of a BPF program does not require reentrancy protection
  * against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
  */
 static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
                                          const void *ctx)
index b976c41..8fcc384 100644 (file)
@@ -624,7 +624,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
 void free_pages_exact(void *virt, size_t size);
-__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
 
 #define __get_free_page(gfp_mask) \
                __get_free_pages((gfp_mask), 0)
index 9e067f9..f453be3 100644 (file)
@@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
        return hdev->ll_driver == driver;
 }
 
+static inline bool hid_is_usb(struct hid_device *hdev)
+{
+       return hid_is_using_ll_driver(hdev, &usb_hid_driver);
+}
+
 #define        PM_HINT_FULLON  1<<5
 #define PM_HINT_NORMAL 1<<1
 
index fa2cd8c..24359b4 100644 (file)
@@ -11,7 +11,7 @@
        asm volatile(__stringify(c) ": nop\n\t"                         \
                     ".pushsection .discard.instr_begin\n\t"            \
                     ".long " __stringify(c) "b - .\n\t"                \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define instrumentation_begin() __instrumentation_begin(__COUNTER__)
 
@@ -50,7 +50,7 @@
        asm volatile(__stringify(c) ": nop\n\t"                         \
                     ".pushsection .discard.instr_end\n\t"              \
                     ".long " __stringify(c) "b - .\n\t"                \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define instrumentation_end() __instrumentation_end(__COUNTER__)
 #else
index 1f22a30..9367f1c 100644 (file)
@@ -329,7 +329,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
 extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
 
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+                                    bool setaffinity);
+
+/**
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq:       Interrupt to update
+ * @m:         cpumask pointer (NULL to clear the hint)
+ *
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
+ */
+static inline int
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+       return __irq_apply_affinity_hint(irq, m, false);
+}
+
+/**
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ *                          cpumask to the interrupt
+ * @irq:       Interrupt to update
+ * @m:         cpumask pointer (NULL to clear the hint)
+ *
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
+ */
+static inline int
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
+{
+       return __irq_apply_affinity_hint(irq, m, true);
+}
+
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+       return irq_set_affinity_and_hint(irq, m);
+}
+
 extern int irq_update_affinity_desc(unsigned int irq,
                                    struct irq_affinity_desc *affinity);
 
@@ -361,6 +400,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
 
 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
 
+static inline int irq_update_affinity_hint(unsigned int irq,
+                                          const struct cpumask *m)
+{
+       return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+                                           const struct cpumask *m)
+{
+       return -EINVAL;
+}
+
 static inline int irq_set_affinity_hint(unsigned int irq,
                                        const struct cpumask *m)
 {
index 20c1f96..a59d25f 100644 (file)
@@ -133,6 +133,7 @@ struct inet6_skb_parm {
        __u16                   dsthao;
 #endif
        __u16                   frag_max_size;
+       __u16                   srhoff;
 
 #define IP6SKB_XFRM_TRANSFORMED        1
 #define IP6SKB_FORWARDED       2
@@ -142,6 +143,7 @@ struct inet6_skb_parm {
 #define IP6SKB_HOPBYHOP        32
 #define IP6SKB_L3SLAVE         64
 #define IP6SKB_JUMBOGRAM      128
+#define IP6SKB_SEG6          256
 };
 
 #if defined(CONFIG_NET_L3_MASTER_DEV)
index 553da48..d476405 100644 (file)
@@ -131,7 +131,7 @@ struct irq_domain_ops {
 #endif
 };
 
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
 
 struct irq_domain_chip_generic;
 
index 8adcf1f..9dc7cb2 100644 (file)
@@ -405,8 +405,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
                                      phys_addr_t end, int nid, bool exact_nid);
 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
-                                             phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+                                                      phys_addr_t align)
 {
        return memblock_phys_alloc_range(size, align, 0,
                                         MEMBLOCK_ALLOC_ACCESSIBLE);
index 7239858..a5cc4cd 100644 (file)
@@ -663,6 +663,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
  */
 int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
 
+/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
 /**
  * mhi_download_rddm_image - Download ramdump image from device for
  *                           debugging purpose.
index 58e744b..936dc0b 100644 (file)
@@ -277,6 +277,7 @@ enum vmscan_throttle_state {
        VMSCAN_THROTTLE_WRITEBACK,
        VMSCAN_THROTTLE_ISOLATED,
        VMSCAN_THROTTLE_NOPROGRESS,
+       VMSCAN_THROTTLE_CONGESTED,
        NR_VMSCAN_THROTTLE,
 };
 
index be5cb33..6aadcc0 100644 (file)
@@ -1937,7 +1937,7 @@ enum netdev_ml_priv_type {
  *     @udp_tunnel_nic:        UDP tunnel offload state
  *     @xdp_state:             stores info on attached XDP BPF programs
  *
- *     @nested_level:  Used as as a parameter of spin_lock_nested() of
+ *     @nested_level:  Used as a parameter of spin_lock_nested() of
  *                     dev->addr_list_lock.
  *     @unlink_list:   As netif_addr_lock() can be called recursively,
  *                     keep a list of interfaces to be deleted.
index 6052464..d150a90 100644 (file)
@@ -285,7 +285,6 @@ static inline struct inode *folio_inode(struct folio *folio)
 
 static inline bool page_cache_add_speculative(struct page *page, int count)
 {
-       VM_BUG_ON_PAGE(PageTail(page), page);
        return folio_ref_try_add_rcu((struct folio *)page, count);
 }
 
index b31d3f3..d73a1c0 100644 (file)
@@ -51,9 +51,9 @@
 #define _LINUX_PERCPU_REFCOUNT_H
 
 #include <linux/atomic.h>
-#include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/rcupdate.h>
+#include <linux/types.h>
 #include <linux/gfp.h>
 
 struct percpu_ref;
index 96e43fb..cbf03a5 100644 (file)
@@ -538,11 +538,12 @@ struct macsec_ops;
  * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
  * @state: State of the PHY for management purposes
  * @dev_flags: Device-specific flags used by the PHY driver.
- *             Bits [15:0] are free to use by the PHY driver to communicate
- *                         driver specific behavior.
- *             Bits [23:16] are currently reserved for future use.
- *             Bits [31:24] are reserved for defining generic
- *                          PHY driver behavior.
+ *
+ *      - Bits [15:0] are free to use by the PHY driver to communicate
+ *        driver specific behavior.
+ *      - Bits [23:16] are currently reserved for future use.
+ *      - Bits [31:24] are reserved for defining generic
+ *        PHY driver behavior.
  * @irq: IRQ number of the PHY's interrupt (-1 if none)
  * @phy_timer: The timer for handling the state machine
  * @phylink: Pointer to phylink instance for this PHY
index 222da43..eddd66d 100644 (file)
@@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
  * pm_runtime_active - Check whether or not a device is runtime-active.
  * @dev: Target device.
  *
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
  * %RPM_ACTIVE, or %false otherwise.
  *
  * Note that the return value of this function can only be trusted if it is
index bd7a73d..54cf566 100644 (file)
@@ -499,7 +499,8 @@ struct regulator_irq_data {
  *             best to shut-down regulator(s) or reboot the SOC if error
  *             handling is repeatedly failing. If fatal_cnt is given the IRQ
  *             handling is aborted if it fails for fatal_cnt times and die()
- *             callback (if populated) or BUG() is called to try to prevent
+ *             callback (if populated) is called. If die() is not populated
+ *             poweroff for the system is attempted in order to prevent any
  *             further damage.
  * @reread_ms: The time which is waited before attempting to re-read status
  *             at the worker if IC reading fails. Immediate re-read is done
@@ -516,11 +517,12 @@ struct regulator_irq_data {
  * @data:      Driver private data pointer which will be passed as such to
  *             the renable, map_event and die callbacks in regulator_irq_data.
  * @die:       Protection callback. If IC status reading or recovery actions
- *             fail fatal_cnt times this callback or BUG() is called. This
- *             callback should implement a final protection attempt like
- *             disabling the regulator. If protection succeeded this may
- *             return 0. If anything else is returned the core assumes final
- *             protection failed and calls BUG() as a last resort.
+ *             fail fatal_cnt times this callback is called or system is
+ *             powered off. This callback should implement a final protection
+ *             attempt like disabling the regulator. If protection succeeded
+ *             die() may return 0. If anything else is returned the core
+ *             assumes final protection failed and attempts to perform a
+ *             poweroff as a last resort.
  * @map_event: Driver callback to map IRQ status into regulator devices with
  *             events / errors. NOTE: callback MUST initialize both the
  *             errors and notifs for all rdevs which it signals having
index c8cb7e6..4507d77 100644 (file)
@@ -286,6 +286,7 @@ struct nf_bridge_info {
 struct tc_skb_ext {
        __u32 chain;
        __u16 mru;
+       __u16 zone;
        bool post_ct;
 };
 #endif
@@ -1380,7 +1381,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
                    struct flow_dissector *flow_dissector,
                    void *target_container,
                    u16 *ctinfo_map, size_t mapsize,
-                   bool post_ct);
+                   bool post_ct, u16 zone);
 void
 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
                             struct flow_dissector *flow_dissector,
index a1f0346..cf59996 100644 (file)
@@ -195,7 +195,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
  * @offset:    offset of buffer in user space
  * @pages:     locked pages from userspace
  * @num_pages: number of locked pages
- * @dmabuf:    dmabuf used to for exporting to user space
+ * @refcount:  reference counter
  * @flags:     defined by TEE_SHM_* in tee_drv.h
  * @id:                unique id of a shared memory object on this device, shared
  *             with user space
@@ -214,7 +214,7 @@ struct tee_shm {
        unsigned int offset;
        struct page **pages;
        size_t num_pages;
-       struct dma_buf *dmabuf;
+       refcount_t refcount;
        u32 flags;
        int id;
        u64 sec_world_id;
index 04e87f4..a960de6 100644 (file)
@@ -7,9 +7,27 @@
 #include <uapi/linux/udp.h>
 #include <uapi/linux/virtio_net.h>
 
+static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+{
+       switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       case VIRTIO_NET_HDR_GSO_TCPV4:
+               return protocol == cpu_to_be16(ETH_P_IP);
+       case VIRTIO_NET_HDR_GSO_TCPV6:
+               return protocol == cpu_to_be16(ETH_P_IPV6);
+       case VIRTIO_NET_HDR_GSO_UDP:
+               return protocol == cpu_to_be16(ETH_P_IP) ||
+                      protocol == cpu_to_be16(ETH_P_IPV6);
+       default:
+               return false;
+       }
+}
+
 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
                                           const struct virtio_net_hdr *hdr)
 {
+       if (skb->protocol)
+               return 0;
+
        switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
        case VIRTIO_NET_HDR_GSO_TCPV4:
        case VIRTIO_NET_HDR_GSO_UDP:
@@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        if (!skb->protocol) {
                                __be16 protocol = dev_parse_header_protocol(skb);
 
-                               virtio_net_hdr_set_proto(skb, hdr);
-                               if (protocol && protocol != skb->protocol)
+                               if (!protocol)
+                                       virtio_net_hdr_set_proto(skb, hdr);
+                               else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
                                        return -EINVAL;
+                               else
+                                       skb->protocol = protocol;
                        }
 retry:
                        if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
index 2d0df57..851e07d 100644 (file)
@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
 
 #define wake_up(x)                     __wake_up(x, TASK_NORMAL, 1, NULL)
 #define wake_up_nr(x, nr)              __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
 #define wake_up_interruptible_sync_poll_locked(x, m)                           \
        __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+       /*
+        * For performance reasons, we don't always take the queue lock here.
+        * Therefore, we might race with someone removing the last entry from
+        * the queue, and proceed while they still hold the queue lock.
+        * However, rcu_read_lock() is required to be held in such cases, so we
+        * can safely proceed with an RCU-delayed free.
+        */
+       if (waitqueue_active(wq_head))
+               __wake_up_pollfree(wq_head);
+}
+
 #define ___wait_cond_timeout(condition)                                                \
 ({                                                                             \
        bool __cond = (condition);                                              \
index f6af76c..191c36a 100644 (file)
@@ -126,7 +126,7 @@ struct tlb_slave_info {
 struct alb_bond_info {
        struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
        u32                     unbalanced_load;
-       int                     tx_rebalance_counter;
+       atomic_t                tx_rebalance_counter;
        int                     lp_counter;
        /* -------- rlb parameters -------- */
        int rlb_enabled;
index 7994455..c4898fc 100644 (file)
@@ -136,6 +136,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
        sk_rx_queue_update(sk, skb);
 }
 
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+                                      const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+       sk_rx_queue_set(sk, skb);
+}
+
 static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
index cc663c6..d24b0a3 100644 (file)
@@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
 /* jiffies until ct expires, 0 if already expired */
 static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
 {
-       s32 timeout = ct->timeout - nfct_time_stamp;
+       s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
 
        return timeout > 0 ? timeout : 0;
 }
 
 static inline bool nf_ct_is_expired(const struct nf_conn *ct)
 {
-       return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+       return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
 }
 
 /* use after obtaining a reference count */
@@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
 static inline void nf_ct_offload_timeout(struct nf_conn *ct)
 {
        if (nf_ct_expires(ct) < NF_CT_DAY / 2)
-               ct->timeout = nfct_time_stamp + NF_CT_DAY;
+               WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
 }
 
 struct kernel_param;
index bf79f3a..9e71691 100644 (file)
@@ -193,4 +193,20 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
        skb->tstamp = ktime_set(0, 0);
 }
 
+struct tc_skb_cb {
+       struct qdisc_skb_cb qdisc_cb;
+
+       u16 mru;
+       bool post_ct;
+       u16 zone; /* Only valid if post_ct = true */
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+       struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+       BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+       return cb;
+}
+
 #endif
index 22179b2..c70e6d2 100644 (file)
@@ -447,8 +447,6 @@ struct qdisc_skb_cb {
        };
 #define QDISC_CB_PRIV_LEN 20
        unsigned char           data[QDISC_CB_PRIV_LEN];
-       u16                     mru;
-       bool                    post_ct;
 };
 
 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
index 189fdb9..3ae61ce 100644 (file)
@@ -105,19 +105,18 @@ extern struct percpu_counter sctp_sockets_allocated;
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
 void sctp_transport_walk_start(struct rhashtable_iter *iter);
 void sctp_transport_walk_stop(struct rhashtable_iter *iter);
 struct sctp_transport *sctp_transport_get_next(struct net *net,
                        struct rhashtable_iter *iter);
 struct sctp_transport *sctp_transport_get_idx(struct net *net,
                        struct rhashtable_iter *iter, int pos);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
-                                 struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
                                  const union sctp_addr *laddr,
                                  const union sctp_addr *paddr, void *p);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
-                           int (*cb_done)(struct sctp_transport *, void *),
-                           struct net *net, int *pos, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+                                   struct net *net, int *pos, void *p);
 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
                       struct sctp_info *info);
index 899c29c..8dabd88 100644 (file)
@@ -1355,6 +1355,7 @@ struct sctp_endpoint {
              reconf_enable:1;
 
        __u8  strreset_enable;
+       struct rcu_head rcu;
 };
 
 /* Recover the outter endpoint structure. */
@@ -1370,7 +1371,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
 struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
 void sctp_endpoint_free(struct sctp_endpoint *);
 void sctp_endpoint_put(struct sctp_endpoint *);
-void sctp_endpoint_hold(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
 void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
 struct sctp_association *sctp_endpoint_lookup_assoc(
        const struct sctp_endpoint *ep,
index 9d19c15..af668f1 100644 (file)
@@ -58,9 +58,30 @@ extern int seg6_local_init(void);
 extern void seg6_local_exit(void);
 
 extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
+extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
+extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
 extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
                             int proto);
 extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
 extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
                               u32 tbl_id);
+
+/* If the packet which invoked an ICMP error contains an SRH return
+ * the true destination address from within the SRH, otherwise use the
+ * destination address in the IP header.
+ */
+static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
+                                                   struct inet6_skb_parm *opt)
+{
+       struct ipv6_sr_hdr *srh;
+
+       if (opt->flags & IP6SKB_SEG6) {
+               srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
+               return  &srh->segments[0];
+       }
+
+       return NULL;
+}
+
+
 #endif
index bea21ff..d47e965 100644 (file)
@@ -431,7 +431,7 @@ struct sock {
 #ifdef CONFIG_XFRM
        struct xfrm_policy __rcu *sk_policy[2];
 #endif
-       struct dst_entry        *sk_rx_dst;
+       struct dst_entry __rcu  *sk_rx_dst;
        int                     sk_rx_dst_ifindex;
        u32                     sk_rx_dst_cookie;
 
index f25a614..ca2e900 100644 (file)
 #define _VMSCAN_THROTTLE_WRITEBACK     (1 << VMSCAN_THROTTLE_WRITEBACK)
 #define _VMSCAN_THROTTLE_ISOLATED      (1 << VMSCAN_THROTTLE_ISOLATED)
 #define _VMSCAN_THROTTLE_NOPROGRESS    (1 << VMSCAN_THROTTLE_NOPROGRESS)
+#define _VMSCAN_THROTTLE_CONGESTED     (1 << VMSCAN_THROTTLE_CONGESTED)
 
 #define show_throttle_flags(flags)                                             \
        (flags) ? __print_flags(flags, "|",                                     \
                {_VMSCAN_THROTTLE_WRITEBACK,    "VMSCAN_THROTTLE_WRITEBACK"},   \
                {_VMSCAN_THROTTLE_ISOLATED,     "VMSCAN_THROTTLE_ISOLATED"},    \
-               {_VMSCAN_THROTTLE_NOPROGRESS,   "VMSCAN_THROTTLE_NOPROGRESS"}   \
+               {_VMSCAN_THROTTLE_NOPROGRESS,   "VMSCAN_THROTTLE_NOPROGRESS"},  \
+               {_VMSCAN_THROTTLE_CONGESTED,    "VMSCAN_THROTTLE_CONGESTED"}    \
                ) : "VMSCAN_THROTTLE_NONE"
 
 
index 41b509f..f9c520c 100644 (file)
@@ -29,7 +29,7 @@
 #define POLLRDHUP       0x2000
 #endif
 
-#define POLLFREE       (__force __poll_t)0x4000        /* currently only for epoll */
+#define POLLFREE       (__force __poll_t)0x4000
 
 #define POLL_BUSY_LOOP (__force __poll_t)0x8000
 
index 2199adc..80aa5c4 100644 (file)
@@ -9,6 +9,7 @@
 #define __BIG_ENDIAN_BITFIELD
 #endif
 
+#include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/swab.h>
 
index 601c904..cd98982 100644 (file)
@@ -9,6 +9,7 @@
 #define __LITTLE_ENDIAN_BITFIELD
 #endif
 
+#include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/swab.h>
 
index c8cc46f..f106a39 100644 (file)
@@ -136,19 +136,21 @@ struct mptcp_info {
  * MPTCP_EVENT_REMOVED: token, rem_id
  * An address has been lost by the peer.
  *
- * MPTCP_EVENT_SUB_ESTABLISHED: token, family, saddr4 | saddr6,
- *                              daddr4 | daddr6, sport, dport, backup,
- *                              if_idx [, error]
+ * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
+ *                              saddr4 | saddr6, daddr4 | daddr6, sport,
+ *                              dport, backup, if_idx [, error]
  * A new subflow has been established. 'error' should not be set.
  *
- * MPTCP_EVENT_SUB_CLOSED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- *                         sport, dport, backup, if_idx [, error]
+ * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ *                         daddr4 | daddr6, sport, dport, backup, if_idx
+ *                         [, error]
  * A subflow has been closed. An error (copy of sk_err) could be set if an
  * error has been detected for this subflow.
  *
- * MPTCP_EVENT_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6,
- *                           sport, dport, backup, if_idx [, error]
- *       The priority of a subflow has changed. 'error' should not be set.
+ * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ *                           daddr4 | daddr6, sport, dport, backup, if_idx
+ *                           [, error]
+ * The priority of a subflow has changed. 'error' should not be set.
  */
 enum mptcp_event_type {
        MPTCP_EVENT_UNSPEC = 0,
index f6e3c8c..4fa4e97 100644 (file)
@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
 #define NFC_SE_ENABLED  0x1
 
 struct sockaddr_nfc {
-       sa_family_t sa_family;
+       __kernel_sa_family_t sa_family;
        __u32 dev_idx;
        __u32 target_idx;
        __u32 nfc_protocol;
@@ -271,14 +271,14 @@ struct sockaddr_nfc {
 
 #define NFC_LLCP_MAX_SERVICE_NAME 63
 struct sockaddr_nfc_llcp {
-       sa_family_t sa_family;
+       __kernel_sa_family_t sa_family;
        __u32 dev_idx;
        __u32 target_idx;
        __u32 nfc_protocol;
        __u8 dsap; /* Destination SAP, if known */
        __u8 ssap; /* Source SAP to be bound to */
        char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
-       size_t service_name_len;
+       __kernel_size_t service_name_len;
 };
 
 /* NFC socket protocols */
index 74ef57b..ac5d6a3 100644 (file)
@@ -66,10 +66,17 @@ struct rlimit64 {
 #define _STK_LIM       (8*1024*1024)
 
 /*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
  */
-#define MLOCK_LIMIT    ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT    (8*1024*1024)
 
 /*
  * Due to binary compatibility, the actual resource numbers
index c204262..344081e 100644 (file)
@@ -17,6 +17,7 @@ struct xenbus_device;
 unsigned xen_evtchn_nr_channels(void);
 
 int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
                              irq_handler_t handler,
                              unsigned long irqflags, const char *devname,
index 121d37e..4cebadb 100644 (file)
@@ -718,7 +718,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
 {
        int rc = 0;
        struct sk_buff *skb;
-       static unsigned int failed = 0;
+       unsigned int failed = 0;
 
        /* NOTE: kauditd_thread takes care of all our locking, we just use
         *       the netlink info passed to us (e.g. sk and portid) */
@@ -735,32 +735,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
                        continue;
                }
 
+retry:
                /* grab an extra skb reference in case of error */
                skb_get(skb);
                rc = netlink_unicast(sk, skb, portid, 0);
                if (rc < 0) {
-                       /* fatal failure for our queue flush attempt? */
+                       /* send failed - try a few times unless fatal error */
                        if (++failed >= retry_limit ||
                            rc == -ECONNREFUSED || rc == -EPERM) {
-                               /* yes - error processing for the queue */
                                sk = NULL;
                                if (err_hook)
                                        (*err_hook)(skb);
-                               if (!skb_hook)
-                                       goto out;
-                               /* keep processing with the skb_hook */
+                               if (rc == -EAGAIN)
+                                       rc = 0;
+                               /* continue to drain the queue */
                                continue;
                        } else
-                               /* no - requeue to preserve ordering */
-                               skb_queue_head(queue, skb);
+                               goto retry;
                } else {
-                       /* it worked - drop the extra reference and continue */
+                       /* skb sent - drop the extra reference and continue */
                        consume_skb(skb);
                        failed = 0;
                }
        }
 
-out:
        return (rc >= 0 ? 0 : rc);
 }
 
@@ -1609,7 +1607,8 @@ static int __net_init audit_net_init(struct net *net)
                audit_panic("cannot initialize netlink socket in namespace");
                return -ENOMEM;
        }
-       aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+       /* limit the timeout in case auditd is blocked/stopped */
+       aunet->sk->sk_sndtimeo = HZ / 10;
 
        return 0;
 }
index dbc3ad0..9bdb037 100644 (file)
@@ -6346,11 +6346,6 @@ BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
 
 /* BTF ID set registration API for modules */
 
-struct kfunc_btf_id_list {
-       struct list_head list;
-       struct mutex mutex;
-};
-
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 
 void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -6376,8 +6371,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
 {
        struct kfunc_btf_id_set *s;
 
-       if (!owner)
-               return false;
        mutex_lock(&klist->mutex);
        list_for_each_entry(s, &klist->list, list) {
                if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
@@ -6389,8 +6382,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
        return false;
 }
 
-#endif
-
 #define DEFINE_KFUNC_BTF_ID_LIST(name)                                         \
        struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list),           \
                                          __MUTEX_INITIALIZER(name.mutex) };   \
@@ -6398,3 +6389,5 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
 
 DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
 DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
+
+#endif
index 50efda5..b532f10 100644 (file)
@@ -1366,22 +1366,28 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
 
+static bool __reg32_bound_s64(s32 a)
+{
+       return a >= 0 && a <= S32_MAX;
+}
+
 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
 {
        reg->umin_value = reg->u32_min_value;
        reg->umax_value = reg->u32_max_value;
-       /* Attempt to pull 32-bit signed bounds into 64-bit bounds
-        * but must be positive otherwise set to worse case bounds
-        * and refine later from tnum.
+
+       /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
+        * be positive otherwise set to worse case bounds and refine later
+        * from tnum.
         */
-       if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
-               reg->smax_value = reg->s32_max_value;
-       else
-               reg->smax_value = U32_MAX;
-       if (reg->s32_min_value >= 0)
+       if (__reg32_bound_s64(reg->s32_min_value) &&
+           __reg32_bound_s64(reg->s32_max_value)) {
                reg->smin_value = reg->s32_min_value;
-       else
+               reg->smax_value = reg->s32_max_value;
+       } else {
                reg->smin_value = 0;
+               reg->smax_value = U32_MAX;
+       }
 }
 
 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
@@ -2379,8 +2385,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                 */
                if (insn->src_reg != BPF_REG_FP)
                        return 0;
-               if (BPF_SIZE(insn->code) != BPF_DW)
-                       return 0;
 
                /* dreg = *(u64 *)[fp - off] was a fill from the stack.
                 * that [fp - off] slot contains scalar that needs to be
@@ -2403,8 +2407,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                /* scalars can only be spilled into stack */
                if (insn->dst_reg != BPF_REG_FP)
                        return 0;
-               if (BPF_SIZE(insn->code) != BPF_DW)
-                       return 0;
                spi = (-insn->off - 1) / BPF_REG_SIZE;
                if (spi >= 64) {
                        verbose(env, "BUG spi %d\n", spi);
@@ -4551,9 +4553,16 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
 
        if (insn->imm == BPF_CMPXCHG) {
                /* Check comparison of R0 with memory location */
-               err = check_reg_arg(env, BPF_REG_0, SRC_OP);
+               const u32 aux_reg = BPF_REG_0;
+
+               err = check_reg_arg(env, aux_reg, SRC_OP);
                if (err)
                        return err;
+
+               if (is_pointer_value(env, aux_reg)) {
+                       verbose(env, "R%d leaks addr into mem\n", aux_reg);
+                       return -EACCES;
+               }
        }
 
        if (is_pointer_value(env, insn->src_reg)) {
@@ -4588,13 +4597,19 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
                load_reg = -1;
        }
 
-       /* check whether we can read the memory */
+       /* Check whether we can read the memory, with second call for fetch
+        * case to simulate the register fill.
+        */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-                              BPF_SIZE(insn->code), BPF_READ, load_reg, true);
+                              BPF_SIZE(insn->code), BPF_READ, -1, true);
+       if (!err && load_reg >= 0)
+               err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+                                      BPF_SIZE(insn->code), BPF_READ, load_reg,
+                                      true);
        if (err)
                return err;
 
-       /* check whether we can write into the same memory */
+       /* Check whether we can write into the same memory. */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_WRITE, -1, true);
        if (err)
@@ -8308,6 +8323,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                         insn->dst_reg);
                                }
                                zext_32_to_64(dst_reg);
+
+                               __update_reg_bounds(dst_reg);
+                               __reg_deduce_bounds(dst_reg);
+                               __reg_bound_offset(dst_reg);
                        }
                } else {
                        /* case: R = imm
@@ -8422,7 +8441,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 
        new_range = dst_reg->off;
        if (range_right_open)
-               new_range--;
+               new_range++;
 
        /* Examples for register markings:
         *
index bfbeabc..6e36e85 100644 (file)
@@ -65,6 +65,25 @@ static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
        return container_of(kfc, struct cgroup_fs_context, kfc);
 }
 
+struct cgroup_pidlist;
+
+struct cgroup_file_ctx {
+       struct cgroup_namespace *ns;
+
+       struct {
+               void                    *trigger;
+       } psi;
+
+       struct {
+               bool                    started;
+               struct css_task_iter    iter;
+       } procs;
+
+       struct {
+               struct cgroup_pidlist   *pidlist;
+       } procs1;
+};
+
 /*
  * A cgroup can be associated with multiple css_sets as different tasks may
  * belong to different cgroups on different hierarchies.  In the other
index 81c9e06..41e0837 100644 (file)
@@ -394,6 +394,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
         * next pid to display, if any
         */
        struct kernfs_open_file *of = s->private;
+       struct cgroup_file_ctx *ctx = of->priv;
        struct cgroup *cgrp = seq_css(s)->cgroup;
        struct cgroup_pidlist *l;
        enum cgroup_filetype type = seq_cft(s)->private;
@@ -403,25 +404,24 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
        mutex_lock(&cgrp->pidlist_mutex);
 
        /*
-        * !NULL @of->priv indicates that this isn't the first start()
-        * after open.  If the matching pidlist is around, we can use that.
-        * Look for it.  Note that @of->priv can't be used directly.  It
-        * could already have been destroyed.
+        * !NULL @ctx->procs1.pidlist indicates that this isn't the first
+        * start() after open. If the matching pidlist is around, we can use
+        * that. Look for it. Note that @ctx->procs1.pidlist can't be used
+        * directly. It could already have been destroyed.
         */
-       if (of->priv)
-               of->priv = cgroup_pidlist_find(cgrp, type);
+       if (ctx->procs1.pidlist)
+               ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
 
        /*
         * Either this is the first start() after open or the matching
         * pidlist has been destroyed inbetween.  Create a new one.
         */
-       if (!of->priv) {
-               ret = pidlist_array_load(cgrp, type,
-                                        (struct cgroup_pidlist **)&of->priv);
+       if (!ctx->procs1.pidlist) {
+               ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
                if (ret)
                        return ERR_PTR(ret);
        }
-       l = of->priv;
+       l = ctx->procs1.pidlist;
 
        if (pid) {
                int end = l->length;
@@ -449,7 +449,8 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 {
        struct kernfs_open_file *of = s->private;
-       struct cgroup_pidlist *l = of->priv;
+       struct cgroup_file_ctx *ctx = of->priv;
+       struct cgroup_pidlist *l = ctx->procs1.pidlist;
 
        if (l)
                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
@@ -460,7 +461,8 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
 {
        struct kernfs_open_file *of = s->private;
-       struct cgroup_pidlist *l = of->priv;
+       struct cgroup_file_ctx *ctx = of->priv;
+       struct cgroup_pidlist *l = ctx->procs1.pidlist;
        pid_t *p = v;
        pid_t *end = l->list + l->length;
        /*
@@ -504,10 +506,11 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
                goto out_unlock;
 
        /*
-        * Even if we're attaching all tasks in the thread group, we only
-        * need to check permissions on one of them.
+        * Even if we're attaching all tasks in the thread group, we only need
+        * to check permissions on one of them. Check permissions using the
+        * credentials from file open to protect against inherited fd attacks.
         */
-       cred = current_cred();
+       cred = of->file->f_cred;
        tcred = get_task_cred(task);
        if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
            !uid_eq(cred->euid, tcred->uid) &&
index 919194d..cafb8c1 100644 (file)
@@ -3630,6 +3630,7 @@ static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
 static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
                                          size_t nbytes, enum psi_res res)
 {
+       struct cgroup_file_ctx *ctx = of->priv;
        struct psi_trigger *new;
        struct cgroup *cgrp;
        struct psi_group *psi;
@@ -3648,7 +3649,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
                return PTR_ERR(new);
        }
 
-       psi_trigger_replace(&of->priv, new);
+       psi_trigger_replace(&ctx->psi.trigger, new);
 
        cgroup_put(cgrp);
 
@@ -3679,12 +3680,16 @@ static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
 static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
                                          poll_table *pt)
 {
-       return psi_trigger_poll(&of->priv, of->file, pt);
+       struct cgroup_file_ctx *ctx = of->priv;
+
+       return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
 }
 
 static void cgroup_pressure_release(struct kernfs_open_file *of)
 {
-       psi_trigger_replace(&of->priv, NULL);
+       struct cgroup_file_ctx *ctx = of->priv;
+
+       psi_trigger_replace(&ctx->psi.trigger, NULL);
 }
 
 bool cgroup_psi_enabled(void)
@@ -3811,24 +3816,43 @@ static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf,
 static int cgroup_file_open(struct kernfs_open_file *of)
 {
        struct cftype *cft = of_cft(of);
+       struct cgroup_file_ctx *ctx;
+       int ret;
 
-       if (cft->open)
-               return cft->open(of);
-       return 0;
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->ns = current->nsproxy->cgroup_ns;
+       get_cgroup_ns(ctx->ns);
+       of->priv = ctx;
+
+       if (!cft->open)
+               return 0;
+
+       ret = cft->open(of);
+       if (ret) {
+               put_cgroup_ns(ctx->ns);
+               kfree(ctx);
+       }
+       return ret;
 }
 
 static void cgroup_file_release(struct kernfs_open_file *of)
 {
        struct cftype *cft = of_cft(of);
+       struct cgroup_file_ctx *ctx = of->priv;
 
        if (cft->release)
                cft->release(of);
+       put_cgroup_ns(ctx->ns);
+       kfree(ctx);
 }
 
 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
                                 size_t nbytes, loff_t off)
 {
-       struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
+       struct cgroup_file_ctx *ctx = of->priv;
        struct cgroup *cgrp = of->kn->parent->priv;
        struct cftype *cft = of_cft(of);
        struct cgroup_subsys_state *css;
@@ -3845,7 +3869,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
         */
        if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
            !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
-           ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp)
+           ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
                return -EPERM;
 
        if (cft->write)
@@ -4751,21 +4775,21 @@ void css_task_iter_end(struct css_task_iter *it)
 
 static void cgroup_procs_release(struct kernfs_open_file *of)
 {
-       if (of->priv) {
-               css_task_iter_end(of->priv);
-               kfree(of->priv);
-       }
+       struct cgroup_file_ctx *ctx = of->priv;
+
+       if (ctx->procs.started)
+               css_task_iter_end(&ctx->procs.iter);
 }
 
 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
 {
        struct kernfs_open_file *of = s->private;
-       struct css_task_iter *it = of->priv;
+       struct cgroup_file_ctx *ctx = of->priv;
 
        if (pos)
                (*pos)++;
 
-       return css_task_iter_next(it);
+       return css_task_iter_next(&ctx->procs.iter);
 }
 
 static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
@@ -4773,21 +4797,18 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
 {
        struct kernfs_open_file *of = s->private;
        struct cgroup *cgrp = seq_css(s)->cgroup;
-       struct css_task_iter *it = of->priv;
+       struct cgroup_file_ctx *ctx = of->priv;
+       struct css_task_iter *it = &ctx->procs.iter;
 
        /*
         * When a seq_file is seeked, it's always traversed sequentially
         * from position 0, so we can simply keep iterating on !0 *pos.
         */
-       if (!it) {
+       if (!ctx->procs.started) {
                if (WARN_ON_ONCE((*pos)))
                        return ERR_PTR(-EINVAL);
-
-               it = kzalloc(sizeof(*it), GFP_KERNEL);
-               if (!it)
-                       return ERR_PTR(-ENOMEM);
-               of->priv = it;
                css_task_iter_start(&cgrp->self, iter_flags, it);
+               ctx->procs.started = true;
        } else if (!(*pos)) {
                css_task_iter_end(it);
                css_task_iter_start(&cgrp->self, iter_flags, it);
@@ -4838,9 +4859,9 @@ static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
 
 static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
                                         struct cgroup *dst_cgrp,
-                                        struct super_block *sb)
+                                        struct super_block *sb,
+                                        struct cgroup_namespace *ns)
 {
-       struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
        struct cgroup *com_cgrp = src_cgrp;
        int ret;
 
@@ -4869,11 +4890,12 @@ static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
 
 static int cgroup_attach_permissions(struct cgroup *src_cgrp,
                                     struct cgroup *dst_cgrp,
-                                    struct super_block *sb, bool threadgroup)
+                                    struct super_block *sb, bool threadgroup,
+                                    struct cgroup_namespace *ns)
 {
        int ret = 0;
 
-       ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb);
+       ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
        if (ret)
                return ret;
 
@@ -4890,8 +4912,10 @@ static int cgroup_attach_permissions(struct cgroup *src_cgrp,
 static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
                                    bool threadgroup)
 {
+       struct cgroup_file_ctx *ctx = of->priv;
        struct cgroup *src_cgrp, *dst_cgrp;
        struct task_struct *task;
+       const struct cred *saved_cred;
        ssize_t ret;
        bool locked;
 
@@ -4909,9 +4933,16 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
        src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
        spin_unlock_irq(&css_set_lock);
 
-       /* process and thread migrations follow same delegation rule */
+       /*
+        * Process and thread migrations follow same delegation rule. Check
+        * permissions using the credentials from file open to protect against
+        * inherited fd attacks.
+        */
+       saved_cred = override_creds(of->file->f_cred);
        ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
-                                       of->file->f_path.dentry->d_sb, threadgroup);
+                                       of->file->f_path.dentry->d_sb,
+                                       threadgroup, ctx->ns);
+       revert_creds(saved_cred);
        if (ret)
                goto out_finish;
 
@@ -6130,7 +6161,8 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
                goto err;
 
        ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
-                                       !(kargs->flags & CLONE_THREAD));
+                                       !(kargs->flags & CLONE_THREAD),
+                                       current->nsproxy->cgroup_ns);
        if (ret)
                goto err;
 
index eb53f5e..256cf6d 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/buildid.h>
 #include <linux/crash_core.h>
+#include <linux/init.h>
 #include <linux/utsname.h>
 #include <linux/vmalloc.h>
 
@@ -295,6 +296,16 @@ int __init parse_crashkernel_low(char *cmdline,
                                "crashkernel=", suffix_tbl[SUFFIX_LOW]);
 }
 
+/*
+ * Add a dummy early_param handler to mark crashkernel= as a known command line
+ * parameter and suppress incorrect warnings in init/main.c.
+ */
+static int __init parse_crashkernel_dummy(char *arg)
+{
+       return 0;
+}
+early_param("crashkernel", parse_crashkernel_dummy);
+
 Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
                          void *data, size_t data_len)
 {
index 6f29bf4..f0862eb 100644 (file)
@@ -451,7 +451,7 @@ static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
 
 }
 
-struct irq_domain_ops irq_generic_chip_ops = {
+const struct irq_domain_ops irq_generic_chip_ops = {
        .map    = irq_map_generic_chip,
        .unmap  = irq_unmap_generic_chip,
        .xlate  = irq_domain_xlate_onetwocell,
index 7405e38..f23ffd3 100644 (file)
@@ -486,7 +486,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
 }
 EXPORT_SYMBOL_GPL(irq_force_affinity);
 
-int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+                             bool setaffinity)
 {
        unsigned long flags;
        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
@@ -495,12 +496,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
                return -EINVAL;
        desc->affinity_hint = m;
        irq_put_desc_unlock(desc, flags);
-       /* set the initial affinity to prevent every interrupt being on CPU0 */
-       if (m)
+       if (m && setaffinity)
                __irq_set_affinity(irq, m, false);
        return 0;
 }
-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
 
 static void irq_affinity_notify(struct work_struct *work)
 {
index 0c6a48d..1f25a4d 100644 (file)
@@ -1380,7 +1380,7 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
                 *  - the VCPU on which owner runs is preempted
                 */
                if (!owner->on_cpu || need_resched() ||
-                   rt_mutex_waiter_is_top_waiter(lock, waiter) ||
+                   !rt_mutex_waiter_is_top_waiter(lock, waiter) ||
                    vcpu_is_preempted(task_cpu(owner))) {
                        res = false;
                        break;
index 76577d1..eca3810 100644 (file)
@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
 
+void __wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+       __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
+       /* POLLFREE must have cleared the queue. */
+       WARN_ON_ONCE(waitqueue_active(wq_head));
+}
+
 /*
  * Note: we use "set_current_state()" _after_ the wait-queue add,
  * because we need a memory barrier there on SMP, so that any
index a629b11..dfcee38 100644 (file)
@@ -4185,6 +4185,15 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
                                ss_mode != 0))
                        return -EINVAL;
 
+               /*
+                * Return before taking any locks if no actual
+                * sigaltstack changes were requested.
+                */
+               if (t->sas_ss_sp == (unsigned long)ss_sp &&
+                   t->sas_ss_size == ss_size &&
+                   t->sas_ss_flags == ss_flags)
+                       return 0;
+
                sigaltstack_lock();
                if (ss_mode == SS_DISABLE) {
                        ss_size = 0;
index b348749..dcdcb85 100644 (file)
@@ -1306,8 +1306,7 @@ int do_settimeofday64(const struct timespec64 *ts)
        timekeeping_forward_now(tk);
 
        xt = tk_xtime(tk);
-       ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
-       ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
+       ts_delta = timespec64_sub(*ts, xt);
 
        if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
                ret = -EINVAL;
index e3d2c23..85f1021 100644 (file)
@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
 EXPORT_SYMBOL(msleep_interruptible);
 
 /**
- * usleep_range - Sleep for an approximate time
- * @min: Minimum time in usecs to sleep
- * @max: Maximum time in usecs to sleep
+ * usleep_range_state - Sleep for an approximate time in a given state
+ * @min:       Minimum time in usecs to sleep
+ * @max:       Maximum time in usecs to sleep
+ * @state:     State of the current task that will be while sleeping
  *
  * In non-atomic context where the exact wakeup time is flexible, use
- * usleep_range() instead of udelay().  The sleep improves responsiveness
+ * usleep_range_state() instead of udelay().  The sleep improves responsiveness
  * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
  * power usage by allowing hrtimers to take advantage of an already-
  * scheduled interrupt instead of scheduling a new one just for this sleep.
  */
-void __sched usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range_state(unsigned long min, unsigned long max,
+                               unsigned int state)
 {
        ktime_t exp = ktime_add_us(ktime_get(), min);
        u64 delta = (u64)(max - min) * NSEC_PER_USEC;
 
        for (;;) {
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(state);
                /* Do not return before the requested sleep time has elapsed */
                if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
                        break;
        }
 }
-EXPORT_SYMBOL(usleep_range);
+EXPORT_SYMBOL(usleep_range_state);
index 30bc880..be5f6b3 100644 (file)
@@ -5217,6 +5217,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
 {
        struct ftrace_direct_func *direct;
        struct ftrace_func_entry *entry;
+       struct ftrace_hash *hash;
        int ret = -ENODEV;
 
        mutex_lock(&direct_mutex);
@@ -5225,7 +5226,8 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
        if (!entry)
                goto out_unlock;
 
-       if (direct_functions->count == 1)
+       hash = direct_ops.func_hash->filter_hash;
+       if (hash->count == 1)
                unregister_ftrace_function(&direct_ops);
 
        ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
@@ -5540,6 +5542,10 @@ int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
        err = unregister_ftrace_function(ops);
        remove_direct_functions_hash(hash, addr);
        mutex_unlock(&direct_mutex);
+
+       /* cleanup for possible another register call */
+       ops->func = NULL;
+       ops->trampoline = 0;
        return err;
 }
 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
index 88de94d..78ea542 100644 (file)
@@ -3207,7 +3207,7 @@ struct trace_buffer_struct {
        char buffer[4][TRACE_BUF_SIZE];
 };
 
-static struct trace_buffer_struct *trace_percpu_buffer;
+static struct trace_buffer_struct __percpu *trace_percpu_buffer;
 
 /*
  * This allows for lockless recording.  If we're nested too deeply, then
@@ -3217,7 +3217,7 @@ static char *get_trace_buf(void)
 {
        struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
 
-       if (!buffer || buffer->nesting >= 4)
+       if (!trace_percpu_buffer || buffer->nesting >= 4)
                return NULL;
 
        buffer->nesting++;
@@ -3236,7 +3236,7 @@ static void put_trace_buf(void)
 
 static int alloc_percpu_trace_buffer(void)
 {
-       struct trace_buffer_struct *buffers;
+       struct trace_buffer_struct __percpu *buffers;
 
        if (trace_percpu_buffer)
                return 0;
index 22db3ce..ca9c13b 100644 (file)
@@ -1237,9 +1237,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                                                  argv + consumed, &consumed,
                                                  &field_version);
                        if (IS_ERR(field)) {
-                               argv_free(argv);
                                ret = PTR_ERR(field);
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        /*
@@ -1262,18 +1261,19 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                        if (cmd_version > 1 && n_fields_this_loop >= 1) {
                                synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
                                ret = -EINVAL;
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        fields[n_fields++] = field;
                        if (n_fields == SYNTH_FIELDS_MAX) {
                                synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
                                ret = -EINVAL;
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        n_fields_this_loop++;
                }
+               argv_free(argv);
 
                if (consumed < argc) {
                        synth_err(SYNTH_ERR_INVALID_CMD, 0);
@@ -1281,7 +1281,6 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                        goto err;
                }
 
-               argv_free(argv);
        }
 
        if (n_fields == 0) {
@@ -1307,6 +1306,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
        kfree(saved_fields);
 
        return ret;
+ err_free_arg:
+       argv_free(argv);
  err:
        for (i = 0; i < n_fields; i++)
                free_synth_field(fields[i]);
index 4f5613d..7b32c35 100644 (file)
@@ -264,15 +264,16 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
 long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
 {
        struct ucounts *iter;
+       long max = LONG_MAX;
        long ret = 0;
 
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               long max = READ_ONCE(iter->ns->ucount_max[type]);
                long new = atomic_long_add_return(v, &iter->ucount[type]);
                if (new < 0 || new > max)
                        ret = LONG_MAX;
                else if (iter == ucounts)
                        ret = new;
+               max = READ_ONCE(iter->ns->ucount_max[type]);
        }
        return ret;
 }
@@ -312,15 +313,16 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
 {
        /* Caller must hold a reference to ucounts */
        struct ucounts *iter;
+       long max = LONG_MAX;
        long dec, ret = 0;
 
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               long max = READ_ONCE(iter->ns->ucount_max[type]);
                long new = atomic_long_add_return(1, &iter->ucount[type]);
                if (new < 0 || new > max)
                        goto unwind;
                if (iter == ucounts)
                        ret = new;
+               max = READ_ONCE(iter->ns->ucount_max[type]);
                /*
                 * Grab an extra ucount reference for the caller when
                 * the rlimit count was previously 0.
@@ -339,15 +341,16 @@ unwind:
        return 0;
 }
 
-bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
+bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long rlimit)
 {
        struct ucounts *iter;
-       if (get_ucounts_value(ucounts, type) > max)
-               return true;
+       long max = rlimit;
+       if (rlimit > LONG_MAX)
+               max = LONG_MAX;
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               max = READ_ONCE(iter->ns->ucount_max[type]);
                if (get_ucounts_value(iter, type) > max)
                        return true;
+               max = READ_ONCE(iter->ns->ucount_max[type]);
        }
        return false;
 }
index 5c12bde..5e14e32 100644 (file)
@@ -316,6 +316,7 @@ config DEBUG_INFO_BTF
        bool "Generate BTF typeinfo"
        depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
        depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+       depends on BPF_SYSCALL
        help
          Generate deduplicated BTF type information from DWARF debug info.
          Turning this on expects presence of pahole tool, which will convert
index 28edafc..356f4f2 100644 (file)
@@ -428,7 +428,7 @@ config THP_SWAP
 # UP and nommu archs use km based percpu allocator
 #
 config NEED_PER_CPU_KM
-       depends on !SMP
+       depends on !SMP || !MMU
        bool
        default y
 
index 1eead47..eae96df 100644 (file)
@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
        wb_shutdown(&bdi->wb);
        cgwb_bdi_unregister(bdi);
 
+       /*
+        * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
+        * update the global bdi_min_ratio.
+        */
+       if (bdi->min_ratio)
+               bdi_set_min_ratio(bdi, 0);
+
        if (bdi->dev) {
                bdi_debug_unregister(bdi);
                device_unregister(bdi->dev);
index c381b3c..e924978 100644 (file)
@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
        for (i = 0; i < nr_ids; i++) {
                t = damon_new_target(ids[i]);
                if (!t) {
-                       pr_err("Failed to alloc damon_target\n");
                        /* The caller should do cleanup of the ids itself */
                        damon_for_each_target_safe(t, next, ctx)
                                damon_destroy_target(t);
@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
                    unsigned long aggr_int, unsigned long primitive_upd_int,
                    unsigned long min_nr_reg, unsigned long max_nr_reg)
 {
-       if (min_nr_reg < 3) {
-               pr_err("min_nr_regions (%lu) must be at least 3\n",
-                               min_nr_reg);
+       if (min_nr_reg < 3)
                return -EINVAL;
-       }
-       if (min_nr_reg > max_nr_reg) {
-               pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
-                               min_nr_reg, max_nr_reg);
+       if (min_nr_reg > max_nr_reg)
                return -EINVAL;
-       }
 
        ctx->sample_interval = sample_int;
        ctx->aggr_interval = aggr_int;
@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
 
 static void kdamond_usleep(unsigned long usecs)
 {
-       if (usecs > 100 * 1000)
-               schedule_timeout_interruptible(usecs_to_jiffies(usecs));
+       /* See Documentation/timers/timers-howto.rst for the thresholds */
+       if (usecs > 20 * USEC_PER_MSEC)
+               schedule_timeout_idle(usecs_to_jiffies(usecs));
        else
-               usleep_range(usecs, usecs + 1);
+               usleep_idle_range(usecs, usecs + 1);
 }
 
 /* Returns negative error code if it's not activated but should return */
@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
                                ctx->callback.after_sampling(ctx))
                        done = true;
 
-               usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
+               kdamond_usleep(ctx->sample_interval);
 
                if (ctx->primitive.check_accesses)
                        max_nr_accesses = ctx->primitive.check_accesses(ctx);
index 9b520bb..ad65436 100644 (file)
@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
                                &wmarks.low, &parsed);
                if (ret != 18)
                        break;
-               if (!damos_action_valid(action)) {
-                       pr_err("wrong action %d\n", action);
+               if (!damos_action_valid(action))
                        goto fail;
-               }
 
                pos += parsed;
                scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
@@ -355,6 +353,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
                const char __user *buf, size_t count, loff_t *ppos)
 {
        struct damon_ctx *ctx = file->private_data;
+       struct damon_target *t, *next_t;
        bool id_is_pid = true;
        char *kbuf, *nrs;
        unsigned long *targets;
@@ -399,8 +398,12 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
                goto unlock_out;
        }
 
-       /* remove targets with previously-set primitive */
-       damon_set_targets(ctx, NULL, 0);
+       /* remove previously set targets */
+       damon_for_each_target_safe(t, next_t, ctx) {
+               if (targetid_is_pid(ctx))
+                       put_pid((struct pid *)t->id);
+               damon_destroy_target(t);
+       }
 
        /* Configure the context for the address space type */
        if (id_is_pid)
@@ -652,10 +655,12 @@ static void dbgfs_before_terminate(struct damon_ctx *ctx)
        if (!targetid_is_pid(ctx))
                return;
 
+       mutex_lock(&ctx->kdamond_lock);
        damon_for_each_target_safe(t, next, ctx) {
                put_pid((struct pid *)t->id);
                damon_destroy_target(t);
        }
+       mutex_unlock(&ctx->kdamond_lock);
 }
 
 static struct damon_ctx *dbgfs_new_ctx(void)
index ecfd0b2..6a1b927 100644 (file)
@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                                struct damon_addr_range *three_regions,
                                unsigned long *expected, int nr_expected)
 {
-       struct damon_ctx *ctx = damon_new_ctx();
        struct damon_target *t;
        struct damon_region *r;
        int i;
@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
                damon_add_region(r, t);
        }
-       damon_add_target(ctx, t);
 
        damon_va_apply_three_regions(t, three_regions);
 
@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
                KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
        }
-
-       damon_destroy_ctx(ctx);
 }
 
 /*
@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
                        new_three_regions, expected, ARRAY_SIZE(expected));
 }
 
-static void damon_test_split_evenly(struct kunit *test)
+static void damon_test_split_evenly_fail(struct kunit *test,
+               unsigned long start, unsigned long end, unsigned int nr_pieces)
 {
-       struct damon_ctx *c = damon_new_ctx();
-       struct damon_target *t;
-       struct damon_region *r;
-       unsigned long i;
-
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
-                       -EINVAL);
-
-       t = damon_new_target(42);
-       r = damon_new_region(0, 100);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
+       struct damon_target *t = damon_new_target(42);
+       struct damon_region *r = damon_new_region(start, end);
 
        damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
+       KUNIT_EXPECT_EQ(test,
+                       damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
+       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
 
-       i = 0;
        damon_for_each_region(r, t) {
-               KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
-               KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
+               KUNIT_EXPECT_EQ(test, r->ar.start, start);
+               KUNIT_EXPECT_EQ(test, r->ar.end, end);
        }
+
        damon_free_target(t);
+}
+
+static void damon_test_split_evenly_succ(struct kunit *test,
+       unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+       struct damon_target *t = damon_new_target(42);
+       struct damon_region *r = damon_new_region(start, end);
+       unsigned long expected_width = (end - start) / nr_pieces;
+       unsigned long i = 0;
 
-       t = damon_new_target(42);
-       r = damon_new_region(5, 59);
        damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+       KUNIT_EXPECT_EQ(test,
+                       damon_va_evenly_split_region(t, r, nr_pieces), 0);
+       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
 
-       i = 0;
        damon_for_each_region(r, t) {
-               if (i == 4)
+               if (i == nr_pieces - 1)
                        break;
-               KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
-               KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
+               KUNIT_EXPECT_EQ(test,
+                               r->ar.start, start + i++ * expected_width);
+               KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
        }
-       KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
-       KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
+       KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
+       KUNIT_EXPECT_EQ(test, r->ar.end, end);
        damon_free_target(t);
+}
 
-       t = damon_new_target(42);
-       r = damon_new_region(5, 6);
-       damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+static void damon_test_split_evenly(struct kunit *test)
+{
+       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+                       -EINVAL);
 
-       damon_for_each_region(r, t) {
-               KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
-               KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
-       }
-       damon_free_target(t);
-       damon_destroy_ctx(c);
+       damon_test_split_evenly_fail(test, 0, 100, 0);
+       damon_test_split_evenly_succ(test, 0, 100, 10);
+       damon_test_split_evenly_succ(test, 5, 59, 5);
+       damon_test_split_evenly_fail(test, 5, 6, 2);
 }
 
 static struct kunit_case damon_test_cases[] = {
index 35fe490..20a9a9d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/pagewalk.h>
+#include <linux/sched/mm.h>
 
 #include "prmtv-common.h"
 
@@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
        case DAMOS_STAT:
                return 0;
        default:
-               pr_warn("Wrong action %d\n", scheme->action);
                return -EINVAL;
        }
 
index daa0e23..39c4c46 100644 (file)
@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
                        goto skip;
                if (!PageUptodate(page) || PageReadahead(page))
                        goto skip;
-               if (PageHWPoison(page))
-                       goto skip;
                if (!trylock_page(page))
                        goto skip;
                if (page->mapping != mapping)
index abcd178..a1baa19 100644 (file)
@@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
        struct huge_bootmem_page *m = NULL; /* initialize for clang */
        int nr_nodes, node;
 
-       if (nid >= nr_online_nodes)
+       if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
                return 0;
        /* do node specific alloc */
        if (nid != NUMA_NO_NODE) {
index 0994578..a19154a 100644 (file)
@@ -683,6 +683,7 @@ static const struct file_operations objects_fops = {
        .open = open_objects,
        .read = seq_read,
        .llseek = seq_lseek,
+       .release = seq_release,
 };
 
 static int __init kfence_debugfs_init(void)
index 6863a83..2ed5f2a 100644 (file)
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
        rcu_read_unlock();
 }
 
-/*
- * mod_objcg_mlstate() may be called with irq enabled, so
- * mod_memcg_lruvec_state() should be used.
- */
-static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
-                                    struct pglist_data *pgdat,
-                                    enum node_stat_item idx, int nr)
-{
-       struct mem_cgroup *memcg;
-       struct lruvec *lruvec;
-
-       rcu_read_lock();
-       memcg = obj_cgroup_memcg(objcg);
-       lruvec = mem_cgroup_lruvec(memcg, pgdat);
-       mod_memcg_lruvec_state(lruvec, idx, nr);
-       rcu_read_unlock();
-}
-
 /**
  * __count_memcg_events - account VM events in a cgroup
  * @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
 }
 #endif
 
-/*
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
- * sequence used in this case to access content from object stock is slow.
- * To optimize for user context access, there are now two object stocks for
- * task context and interrupt context access respectively.
- *
- * The task context object stock can be accessed by disabling preemption only
- * which is cheap in non-preempt kernel. The interrupt context object stock
- * can only be accessed after disabling interrupt. User context code can
- * access interrupt object stock, but not vice versa.
- */
-static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
-{
-       struct memcg_stock_pcp *stock;
-
-       if (likely(in_task())) {
-               *pflags = 0UL;
-               preempt_disable();
-               stock = this_cpu_ptr(&memcg_stock);
-               return &stock->task_obj;
-       }
-
-       local_irq_save(*pflags);
-       stock = this_cpu_ptr(&memcg_stock);
-       return &stock->irq_obj;
-}
-
-static inline void put_obj_stock(unsigned long flags)
-{
-       if (likely(in_task()))
-               preempt_enable();
-       else
-               local_irq_restore(flags);
-}
-
 /**
  * consume_stock: Try to consume stocked charge on this cpu.
  * @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ retry:
  */
 #define OBJCGS_CLEAR_MASK      (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
 
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+       struct memcg_stock_pcp *stock;
+
+       if (likely(in_task())) {
+               *pflags = 0UL;
+               preempt_disable();
+               stock = this_cpu_ptr(&memcg_stock);
+               return &stock->task_obj;
+       }
+
+       local_irq_save(*pflags);
+       stock = this_cpu_ptr(&memcg_stock);
+       return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+       if (likely(in_task()))
+               preempt_enable();
+       else
+               local_irq_restore(flags);
+}
+
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+                                    struct pglist_data *pgdat,
+                                    enum node_stat_item idx, int nr)
+{
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
+
+       rcu_read_lock();
+       memcg = obj_cgroup_memcg(objcg);
+       lruvec = mem_cgroup_lruvec(memcg, pgdat);
+       mod_memcg_lruvec_state(lruvec, idx, nr);
+       rcu_read_unlock();
+}
+
 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
                                 gfp_t gfp, bool new_page)
 {
index 07c875f..3a27446 100644 (file)
@@ -1470,17 +1470,12 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
        if (!(flags & MF_COUNT_INCREASED)) {
                res = get_hwpoison_page(p, flags);
                if (!res) {
-                       /*
-                        * Check "filter hit" and "race with other subpage."
-                        */
                        lock_page(head);
-                       if (PageHWPoison(head)) {
-                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
-                                   || (p != head && TestSetPageHWPoison(head))) {
+                       if (hwpoison_filter(p)) {
+                               if (TestClearPageHWPoison(head))
                                        num_poisoned_pages_dec();
-                                       unlock_page(head);
-                                       return 0;
-                               }
+                               unlock_page(head);
+                               return 0;
                        }
                        unlock_page(head);
                        res = MF_FAILED;
@@ -2239,6 +2234,7 @@ retry:
        } else if (ret == 0) {
                if (soft_offline_free_page(page) && try_again) {
                        try_again = false;
+                       flags &= ~MF_COUNT_INCREASED;
                        goto retry;
                }
        }
index 852041f..2a9627d 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/memblock.h>
 #include <linux/compaction.h>
 #include <linux/rmap.h>
+#include <linux/module.h>
 
 #include <asm/tlbflush.h>
 
index 10e9c87..f6248af 100644 (file)
@@ -2140,8 +2140,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                         * memory with both reclaim and compact as well.
                         */
                        if (!page && (gfp & __GFP_DIRECT_RECLAIM))
-                               page = __alloc_pages_node(hpage_node,
-                                                               gfp, order);
+                               page = __alloc_pages(gfp, order, hpage_node, nmask);
 
                        goto out;
                }
index a862682..abe7db5 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5081,6 +5081,7 @@ struct loc_track {
        unsigned long max;
        unsigned long count;
        struct location *loc;
+       loff_t idx;
 };
 
 static struct dentry *slab_debugfs_root;
@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
 static int slab_debugfs_show(struct seq_file *seq, void *v)
 {
-
-       struct location *l;
-       unsigned int idx = *(unsigned int *)v;
        struct loc_track *t = seq->private;
+       struct location *l;
+       unsigned long idx;
 
+       idx = (unsigned long) t->idx;
        if (idx < t->count) {
                l = &t->loc[idx];
 
@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
 {
        struct loc_track *t = seq->private;
 
-       v = ppos;
-       ++*ppos;
+       t->idx = ++(*ppos);
        if (*ppos <= t->count)
-               return v;
+               return ppos;
 
        return NULL;
 }
 
 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
 {
+       struct loc_track *t = seq->private;
+
+       t->idx = *ppos;
        return ppos;
 }
 
index 16f706c..2b55318 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/swap_slots.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
index fb95846..700434d 100644 (file)
@@ -1021,6 +1021,39 @@ static void handle_write_error(struct address_space *mapping,
        unlock_page(page);
 }
 
+static bool skip_throttle_noprogress(pg_data_t *pgdat)
+{
+       int reclaimable = 0, write_pending = 0;
+       int i;
+
+       /*
+        * If kswapd is disabled, reschedule if necessary but do not
+        * throttle as the system is likely near OOM.
+        */
+       if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
+               return true;
+
+       /*
+        * If there are a lot of dirty/writeback pages then do not
+        * throttle as throttling will occur when the pages cycle
+        * towards the end of the LRU if still under writeback.
+        */
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               struct zone *zone = pgdat->node_zones + i;
+
+               if (!populated_zone(zone))
+                       continue;
+
+               reclaimable += zone_reclaimable_pages(zone);
+               write_pending += zone_page_state_snapshot(zone,
+                                                 NR_ZONE_WRITE_PENDING);
+       }
+       if (2 * write_pending <= reclaimable)
+               return true;
+
+       return false;
+}
+
 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
 {
        wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
@@ -1056,8 +1089,16 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
                }
 
                break;
+       case VMSCAN_THROTTLE_CONGESTED:
+               fallthrough;
        case VMSCAN_THROTTLE_NOPROGRESS:
-               timeout = HZ/2;
+               if (skip_throttle_noprogress(pgdat)) {
+                       cond_resched();
+                       return;
+               }
+
+               timeout = 1;
+
                break;
        case VMSCAN_THROTTLE_ISOLATED:
                timeout = HZ/50;
@@ -3321,7 +3362,7 @@ again:
        if (!current_is_kswapd() && current_may_throttle() &&
            !sc->hibernation_mode &&
            test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
-               reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+               reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
 
        if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
                                    sc))
@@ -3386,16 +3427,16 @@ static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
        }
 
        /*
-        * Do not throttle kswapd on NOPROGRESS as it will throttle on
-        * VMSCAN_THROTTLE_WRITEBACK if there are too many pages under
-        * writeback and marked for immediate reclaim at the tail of
-        * the LRU.
+        * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
+        * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
+        * under writeback and marked for immediate reclaim at the tail of the
+        * LRU.
         */
-       if (current_is_kswapd())
+       if (current_is_kswapd() || cgroup_reclaim(sc))
                return;
 
        /* Throttle if making no progress at high prioities. */
-       if (sc->priority < DEF_PRIORITY - 2)
+       if (sc->priority == 1 && !sc->nr_reclaimed)
                reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
 }
 
@@ -3415,6 +3456,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
        unsigned long nr_soft_scanned;
        gfp_t orig_mask;
        pg_data_t *last_pgdat = NULL;
+       pg_data_t *first_pgdat = NULL;
 
        /*
         * If the number of buffer_heads in the machine exceeds the maximum
@@ -3478,14 +3520,19 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                        /* need some check for avoid more shrink_zone() */
                }
 
+               if (!first_pgdat)
+                       first_pgdat = zone->zone_pgdat;
+
                /* See comment about same check for global reclaim above */
                if (zone->zone_pgdat == last_pgdat)
                        continue;
                last_pgdat = zone->zone_pgdat;
                shrink_node(zone->zone_pgdat, sc);
-               consider_reclaim_throttle(zone->zone_pgdat, sc);
        }
 
+       if (first_pgdat)
+               consider_reclaim_throttle(first_pgdat, sc);
+
        /*
         * Restore to original mask to avoid the impact on the caller if we
         * promoted it to __GFP_HIGHMEM.
index 2f34bbd..cfca99e 100644 (file)
@@ -85,8 +85,10 @@ static void ax25_kill_by_device(struct net_device *dev)
 again:
        ax25_for_each(s, &ax25_list) {
                if (s->ax25_dev == ax25_dev) {
-                       s->ax25_dev = NULL;
                        spin_unlock_bh(&ax25_list_lock);
+                       lock_sock(s->sk);
+                       s->ax25_dev = NULL;
+                       release_sock(s->sk);
                        ax25_disconnect(s, ENETUNREACH);
                        spin_lock_bh(&ax25_list_lock);
 
index 433901d..f4004cf 100644 (file)
@@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
+ * @is_routable: stores whether the destination is routable
  *
  * Return: the forwarding mode as enum batadv_forw_mode and in case of
  * BATADV_FORW_SINGLE set the orig to the single originator the skb
@@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
  */
 enum batadv_forw_mode
 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                      struct batadv_orig_node **orig)
+                      struct batadv_orig_node **orig, int *is_routable)
 {
        int ret, tt_count, ip_count, unsnoop_count, total_count;
        bool is_unsnoopable = false;
        unsigned int mcast_fanout;
        struct ethhdr *ethhdr;
-       int is_routable = 0;
        int rtr_count = 0;
 
        ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
-                                          &is_routable);
+                                          is_routable);
        if (ret == -ENOMEM)
                return BATADV_FORW_NONE;
        else if (ret < 0)
@@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
        ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
        unsnoop_count = !is_unsnoopable ? 0 :
                        atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
-       rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
+       rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
 
        total_count = tt_count + ip_count + unsnoop_count + rtr_count;
 
@@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the multicast packet to transmit
  * @vid: the vlan identifier
+ * @is_routable: stores whether the destination is routable
  *
  * Sends copies of a frame with multicast destination to any node that signaled
  * interest in it, that is either via the translation table or the according
@@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
  */
 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                          unsigned short vid)
+                          unsigned short vid, int is_routable)
 {
        int ret;
 
@@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
                return ret;
        }
 
+       if (!is_routable)
+               goto skip_mc_router;
+
        ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
        if (ret != NET_XMIT_SUCCESS) {
                kfree_skb(skb);
                return ret;
        }
 
+skip_mc_router:
        consume_skb(skb);
        return ret;
 }
index 9fee5da..8aec818 100644 (file)
@@ -43,7 +43,8 @@ enum batadv_forw_mode {
 
 enum batadv_forw_mode
 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                      struct batadv_orig_node **mcast_single_orig);
+                      struct batadv_orig_node **mcast_single_orig,
+                      int *is_routable);
 
 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
                                struct sk_buff *skb,
@@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
                                struct batadv_orig_node *orig_node);
 
 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                          unsigned short vid);
+                          unsigned short vid, int is_routable);
 
 void batadv_mcast_init(struct batadv_priv *bat_priv);
 
@@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
 
 static inline enum batadv_forw_mode
 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                      struct batadv_orig_node **mcast_single_orig)
+                      struct batadv_orig_node **mcast_single_orig,
+                      int *is_routable)
 {
        return BATADV_FORW_ALL;
 }
@@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
 
 static inline int
 batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
-                      unsigned short vid)
+                      unsigned short vid, int is_routable)
 {
        kfree_skb(skb);
        return NET_XMIT_DROP;
index 7ee0933..2dbbe6c 100644 (file)
@@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
        int gw_mode;
        enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
        struct batadv_orig_node *mcast_single_orig = NULL;
+       int mcast_is_routable = 0;
        int network_offset = ETH_HLEN;
        __be16 proto;
 
@@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
 send:
                if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
                        forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
-                                                          &mcast_single_orig);
+                                                          &mcast_single_orig,
+                                                          &mcast_is_routable);
                        if (forw_mode == BATADV_FORW_NONE)
                                goto dropped;
 
@@ -359,7 +361,8 @@ send:
                        ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
                                                          mcast_single_orig);
                } else if (forw_mode == BATADV_FORW_SOME) {
-                       ret = batadv_mcast_forw_send(bat_priv, skb, vid);
+                       ret = batadv_mcast_forw_send(bat_priv, skb, vid,
+                                                    mcast_is_routable);
                } else {
                        if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
                                                                  skb))
index db4ab2c..891cfcf 100644 (file)
@@ -337,7 +337,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
 
                args[2] = get_bridge_ifindices(net, indices, args[2]);
 
-               ret = copy_to_user(uarg, indices,
+               ret = copy_to_user((void __user *)args[1], indices,
                                   array_size(args[2], sizeof(int)))
                        ? -EFAULT : args[2];
 
index f3d7511..de24098 100644 (file)
@@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
 }
 #endif
 
+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+                                 unsigned long val)
+{
+       unsigned long intvl_jiffies = clock_t_to_jiffies(val);
+
+       if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
+               br_info(brmctx->br,
+                       "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
+                       jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
+                       jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
+               intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+       }
+
+       brmctx->multicast_query_interval = intvl_jiffies;
+}
+
+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+                                         unsigned long val)
+{
+       unsigned long intvl_jiffies = clock_t_to_jiffies(val);
+
+       if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
+               br_info(brmctx->br,
+                       "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
+                       jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
+                       jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
+               intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+       }
+
+       brmctx->multicast_startup_query_interval = intvl_jiffies;
+}
+
 /**
  * br_multicast_list_adjacent - Returns snooped multicast addresses
  * @dev:       The bridge port adjacent to which to retrieve addresses
index 0c8b5f1..2ff83d8 100644 (file)
@@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
        if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
                u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
 
-               br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+               br_multicast_set_query_intvl(&br->multicast_ctx, val);
        }
 
        if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
        if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
                u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
 
-               br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+               br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
        }
 
        if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
index c0efd69..e8c6ee3 100644 (file)
@@ -28,6 +28,8 @@
 #define BR_MAX_PORTS   (1<<BR_PORT_BITS)
 
 #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
 
 #define BR_HWDOM_MAX BITS_PER_LONG
 
@@ -963,6 +965,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb,
                                    int nest_attr);
 size_t br_multicast_querier_state_size(void);
 size_t br_rports_size(const struct net_bridge_mcast *brmctx);
+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+                                 unsigned long val);
+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+                                         unsigned long val);
 
 static inline bool br_group_is_l2(const struct br_ip *group)
 {
@@ -1147,9 +1153,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx)
 static inline bool
 br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx)
 {
-       return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
-              br_multicast_ctx_is_vlan(brmctx) &&
-              !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED);
+       return br_multicast_ctx_is_vlan(brmctx) &&
+              (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) ||
+               !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED));
 }
 
 static inline bool
index d9a89dd..7b0c197 100644 (file)
@@ -658,7 +658,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
 static int set_query_interval(struct net_bridge *br, unsigned long val,
                              struct netlink_ext_ack *extack)
 {
-       br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+       br_multicast_set_query_intvl(&br->multicast_ctx, val);
        return 0;
 }
 
@@ -706,7 +706,7 @@ static ssize_t multicast_startup_query_interval_show(
 static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
                                      struct netlink_ext_ack *extack)
 {
-       br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+       br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
        return 0;
 }
 
index 8ffd4ed..a638297 100644 (file)
@@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
                u64 val;
 
                val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
-               v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+               br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
                *changed = true;
        }
        if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
                u64 val;
 
                val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
-               v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+               br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
                *changed = true;
        }
        if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
index 2a352e6..c4708e2 100644 (file)
@@ -3941,8 +3941,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
                return skb;
 
        /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
-       qdisc_skb_cb(skb)->mru = 0;
-       qdisc_skb_cb(skb)->post_ct = false;
+       tc_skb_cb(skb)->mru = 0;
+       tc_skb_cb(skb)->post_ct = false;
        mini_qdisc_bstats_cpu_update(miniq, skb);
 
        switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
@@ -5103,8 +5103,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        }
 
        qdisc_skb_cb(skb)->pkt_len = skb->len;
-       qdisc_skb_cb(skb)->mru = 0;
-       qdisc_skb_cb(skb)->post_ct = false;
+       tc_skb_cb(skb)->mru = 0;
+       tc_skb_cb(skb)->post_ct = false;
        skb->tc_at_ingress = 1;
        mini_qdisc_bstats_cpu_update(miniq, skb);
 
index 5ad72db..c06c9ba 100644 (file)
@@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
                return err;
        }
 
-       if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
-           info->attrs[DEVLINK_ATTR_NETNS_FD] ||
-           info->attrs[DEVLINK_ATTR_NETNS_ID]) {
-               dest_net = devlink_netns_get(skb, info);
-               if (IS_ERR(dest_net))
-                       return PTR_ERR(dest_net);
-       }
-
        if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
                action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
        else
@@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
                }
        }
+       if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+           info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+           info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+               dest_net = devlink_netns_get(skb, info);
+               if (IS_ERR(dest_net))
+                       return PTR_ERR(dest_net);
+       }
+
        err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
 
        if (dest_net)
index 3255f57..1b094c4 100644 (file)
@@ -238,7 +238,7 @@ void
 skb_flow_dissect_ct(const struct sk_buff *skb,
                    struct flow_dissector *flow_dissector,
                    void *target_container, u16 *ctinfo_map,
-                   size_t mapsize, bool post_ct)
+                   size_t mapsize, bool post_ct, u16 zone)
 {
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        struct flow_dissector_key_ct *key;
@@ -260,6 +260,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
        if (!ct) {
                key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
                                TCA_FLOWER_KEY_CT_FLAGS_INVALID;
+               key->ct_zone = zone;
                return;
        }
 
index 2820aca..9ccd64e 100644 (file)
@@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
                        nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
 
                        if (nla_entype) {
+                               if (nla_len(nla_entype) < sizeof(u16)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
+                                       return -EINVAL;
+                               }
                                encap_type = nla_get_u16(nla_entype);
 
                                if (lwtunnel_valid_encap_type(encap_type,
index 72ba027..dda12fb 100644 (file)
@@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 
        ASSERT_RTNL();
 
-       n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+       n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
        if (!n)
                goto out;
 
-       n->protocol = 0;
        write_pnet(&n->net, net);
        memcpy(n->key, pkey, key_len);
        n->dev = dev;
index ba2f382..909db87 100644 (file)
@@ -832,7 +832,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
               ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
 
        if (dev)
-               printk("%sdev name=%s feat=0x%pNF\n",
+               printk("%sdev name=%s feat=%pNF\n",
                       level, dev->name, &dev->features);
        if (sk)
                printk("%ssk family=%hu type=%u proto=%u\n",
index 1ae52ac..8eb671c 100644 (file)
@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
 
 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
 {
+       psock_set_prog(&psock->progs.stream_parser, NULL);
+
        if (!psock->saved_data_ready)
                return;
 
@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
 
 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
 {
+       psock_set_prog(&psock->progs.stream_verdict, NULL);
+       psock_set_prog(&psock->progs.skb_verdict, NULL);
+
        if (!psock->saved_data_ready)
                return;
 
index f39ef79..4ca4b11 100644 (file)
@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
                write_lock_bh(&sk->sk_callback_lock);
                if (strp_stop)
                        sk_psock_stop_strp(sk, psock);
-               else
+               if (verdict_stop)
                        sk_psock_stop_verdict(sk, psock);
+
+               if (psock->psock_update_sk_prot)
+                       psock->psock_update_sk_prot(sk, psock, false);
                write_unlock_bh(&sk->sk_callback_lock);
        }
 }
@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
 
        if (msg_parser)
                psock_set_prog(&psock->progs.msg_parser, msg_parser);
+       if (stream_parser)
+               psock_set_prog(&psock->progs.stream_parser, stream_parser);
+       if (stream_verdict)
+               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+       if (skb_verdict)
+               psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 
        ret = sock_map_init_proto(sk, psock);
        if (ret < 0)
@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
                ret = sk_psock_init_strp(sk, psock);
                if (ret)
                        goto out_unlock_drop;
-               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
-               psock_set_prog(&psock->progs.stream_parser, stream_parser);
                sk_psock_start_strp(sk, psock);
        } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
-               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
                sk_psock_start_verdict(sk,psock);
        } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
-               psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
                sk_psock_start_verdict(sk, psock);
        }
        write_unlock_bh(&sk->sk_callback_lock);
index de1c849..4ed74d5 100644 (file)
@@ -47,9 +47,13 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
        void *injection;
        __be32 *prefix;
        u32 rew_op = 0;
+       u64 qos_class;
 
        ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
 
+       qos_class = netdev_get_num_tc(netdev) ?
+                   netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
+
        injection = skb_push(skb, OCELOT_TAG_LEN);
        prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
 
@@ -57,7 +61,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
        memset(injection, 0, OCELOT_TAG_LEN);
        ocelot_ifh_set_bypass(injection, 1);
        ocelot_ifh_set_src(injection, ds->num_ports);
-       ocelot_ifh_set_qos_class(injection, skb->priority);
+       ocelot_ifh_set_qos_class(injection, qos_class);
        ocelot_ifh_set_vlan_tci(injection, vlan_tci);
        ocelot_ifh_set_tag_type(injection, tag_type);
 
index 38b44c0..96f4180 100644 (file)
@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
        if (dev->dev.parent)
                pm_runtime_get_sync(dev->dev.parent);
 
-       if (!netif_device_present(dev)) {
+       if (!netif_device_present(dev) ||
+           dev->reg_state == NETREG_UNREGISTERING) {
                ret = -ENODEV;
                goto err;
        }
index 0189e3c..5f70ffd 100644 (file)
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
 
        kfree(rcu_dereference_protected(inet->inet_opt, 1));
        dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
-       dst_release(sk->sk_rx_dst);
+       dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
        sk_refcnt_debug_dec(sk);
 }
 EXPORT_SYMBOL(inet_sock_destruct);
@@ -1994,6 +1994,10 @@ static int __init inet_init(void)
 
        ip_init();
 
+       /* Initialise per-cpu ipv4 mibs */
+       if (init_ipv4_mibs())
+               panic("%s: Cannot init ipv4 mibs\n", __func__);
+
        /* Setup TCP slab cache for open requests. */
        tcp_init();
 
@@ -2024,12 +2028,6 @@ static int __init inet_init(void)
 
        if (init_inet_pernet_ops())
                pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
-       /*
-        *      Initialise per-cpu ipv4 mibs
-        */
-
-       if (init_ipv4_mibs())
-               pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
 
        ipv4_proc_init();
 
index fde7797..92c29ab 100644 (file)
@@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
        return nhs;
 }
 
+static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
+                           struct netlink_ext_ack *extack)
+{
+       if (nla_len(nla) < sizeof(*gw)) {
+               NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
+               return -EINVAL;
+       }
+
+       *gw = nla_get_in_addr(nla);
+
+       return 0;
+}
+
 /* only called when fib_nh is integrated into fib_info */
 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                       int remaining, struct fib_config *cfg,
@@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                                return -EINVAL;
                        }
                        if (nla) {
-                               fib_cfg.fc_gw4 = nla_get_in_addr(nla);
+                               ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
+                                                      extack);
+                               if (ret)
+                                       goto errout;
+
                                if (fib_cfg.fc_gw4)
                                        fib_cfg.fc_gw_family = AF_INET;
                        } else if (nlav) {
@@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                        }
 
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
-                       if (nla)
+                       if (nla) {
+                               if (nla_len(nla) < sizeof(u32)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
+                                       return -EINVAL;
+                               }
                                fib_cfg.fc_flow = nla_get_u32(nla);
+                       }
 
                        fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+                       /* RTA_ENCAP_TYPE length checked in
+                        * lwtunnel_valid_encap_type_attr
+                        */
                        nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
                        if (nla)
                                fib_cfg.fc_encap_type = nla_get_u16(nla);
@@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
                attrlen = rtnh_attrlen(rtnh);
                if (attrlen > 0) {
                        struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
+                       int err;
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        nlav = nla_find(attrs, attrlen, RTA_VIA);
@@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
                        }
 
                        if (nla) {
+                               __be32 gw;
+
+                               err = fib_gw_from_attr(&gw, nla, extack);
+                               if (err)
+                                       return err;
+
                                if (nh->fib_nh_gw_family != AF_INET ||
-                                   nla_get_in_addr(nla) != nh->fib_nh_gw4)
+                                   gw != nh->fib_nh_gw4)
                                        return 1;
                        } else if (nlav) {
                                struct fib_config cfg2;
-                               int err;
 
                                err = fib_gw_from_via(&cfg2, nlav, extack);
                                if (err)
@@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
                        nla = nla_find(attrs, attrlen, RTA_FLOW);
-                       if (nla && nla_get_u32(nla) != nh->nh_tclassid)
-                               return 1;
+                       if (nla) {
+                               if (nla_len(nla) < sizeof(u32)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
+                                       return -EINVAL;
+                               }
+                               if (nla_get_u32(nla) != nh->nh_tclassid)
+                                       return 1;
+                       }
 #endif
                }
 
index f7fea3a..62a67fd 100644 (file)
@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
 
        sk_node_init(&nreq_sk->sk_node);
        nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
 #endif
        nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
index c8fa6e7..581b5b2 100644 (file)
@@ -261,6 +261,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
        r->idiag_retrans = 0;
+       r->idiag_expires = 0;
 
        if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
                                     sk_user_ns(NETLINK_CB(cb->skb).sk),
@@ -314,9 +315,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                r->idiag_retrans = icsk->icsk_probes_out;
                r->idiag_expires =
                        jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
-       } else {
-               r->idiag_timer = 0;
-               r->idiag_expires = 0;
        }
 
        if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
index bbb3d39..2bb28bf 100644 (file)
@@ -3012,8 +3012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
-       dst_release(sk->sk_rx_dst);
-       sk->sk_rx_dst = NULL;
+       dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
        tcp_saved_syn_free(tp);
        tp->compressed_ack = 0;
        tp->segs_in = 0;
index 246ab7b..0ce4684 100644 (file)
@@ -5787,7 +5787,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
        trace_tcp_probe(sk, skb);
 
        tcp_mstamp_refresh(tp);
-       if (unlikely(!sk->sk_rx_dst))
+       if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
                inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
        /*
         *      Header prediction.
index 13d868c..084df22 100644 (file)
@@ -1701,7 +1701,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        struct sock *rsk;
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-               struct dst_entry *dst = sk->sk_rx_dst;
+               struct dst_entry *dst;
+
+               dst = rcu_dereference_protected(sk->sk_rx_dst,
+                                               lockdep_sock_is_held(sk));
 
                sock_rps_save_rxhash(sk, skb);
                sk_mark_napi_id(sk, skb);
@@ -1709,8 +1712,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                        if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
                            !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
                                             dst, 0)) {
+                               RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
                                dst_release(dst);
-                               sk->sk_rx_dst = NULL;
                        }
                }
                tcp_rcv_established(sk, skb);
@@ -1786,7 +1789,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk_fullsock(sk)) {
-                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+                       struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, 0);
@@ -2201,7 +2204,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
 
        if (dst && dst_hold_safe(dst)) {
-               sk->sk_rx_dst = dst;
+               rcu_assign_pointer(sk->sk_rx_dst, dst);
                sk->sk_rx_dst_ifindex = skb->skb_iif;
        }
 }
index cf913a6..7c2d3ac 100644 (file)
@@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
        int ret = 0;
        int state = child->sk_state;
 
-       /* record NAPI ID of child */
-       sk_mark_napi_id(child, skb);
+       /* record sk_napi_id and sk_rx_queue_mapping of child. */
+       sk_mark_napi_id_set(child, skb);
 
        tcp_segs_in(tcp_sk(child), skb);
        if (!sock_owned_by_user(child)) {
index 8bcecdd..0cd6b85 100644 (file)
@@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
                        kfree_skb(skb);
                        return -EINVAL;
                }
-               if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+               if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -2250,7 +2250,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
        struct dst_entry *old;
 
        if (dst_hold_safe(dst)) {
-               old = xchg(&sk->sk_rx_dst, dst);
+               old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
                dst_release(old);
                return old != dst;
        }
@@ -2440,7 +2440,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                struct dst_entry *dst = skb_dst(skb);
                int ret;
 
-               if (unlikely(sk->sk_rx_dst != dst))
+               if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
                        udp_sk_rx_dst_set(sk, dst);
 
                ret = udp_unicast_rcv_skb(sk, skb, uh);
@@ -2599,7 +2599,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = READ_ONCE(sk->sk_rx_dst);
+       dst = rcu_dereference(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, 0);
@@ -3075,7 +3075,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
 {
        seq_setwidth(seq, 127);
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
+               seq_puts(seq, "   sl  local_address rem_address   st tx_queue "
                           "rx_queue tr tm->when retrnsmt   uid  timeout "
                           "inode ref pointer drops");
        else {
index a7c31ab..96c5cc0 100644 (file)
@@ -57,6 +57,7 @@
 #include <net/protocol.h>
 #include <net/raw.h>
 #include <net/rawv6.h>
+#include <net/seg6.h>
 #include <net/transp_v6.h>
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
@@ -820,6 +821,7 @@ out_bh_enable:
 
 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
 {
+       struct inet6_skb_parm *opt = IP6CB(skb);
        const struct inet6_protocol *ipprot;
        int inner_offset;
        __be16 frag_off;
@@ -829,6 +831,8 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
 
+       seg6_icmp_srh(skb, opt);
+
        nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
        if (ipv6_ext_hdr(nexthdr)) {
                /* now skip over extension headers */
@@ -853,7 +857,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
 
        ipprot = rcu_dereference(inet6_protos[nexthdr]);
        if (ipprot && ipprot->err_handler)
-               ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
+               ipprot->err_handler(skb, opt, type, code, inner_offset, info);
 
        raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
        return;
index 527e9ea..5e9474b 100644 (file)
@@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data
        struct net *net = dev_net(dev);
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
 
+       memset(&p1, 0, sizeof(p1));
+
        switch (cmd) {
        case SIOCGETTUNNEL:
                if (dev == ip6n->fb_tnl_dev) {
index 60f1e4f..c51d5ce 100644 (file)
@@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
        struct raw6_sock *rp = raw6_sk(sk);
        int val;
 
+       if (optlen < sizeof(val))
+               return -EINVAL;
+
        if (copy_from_sockptr(&val, optval, sizeof(val)))
                return -EFAULT;
 
index 42d60c7..1deb629 100644 (file)
@@ -5224,6 +5224,19 @@ out:
        return should_notify;
 }
 
+static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
+                            struct netlink_ext_ack *extack)
+{
+       if (nla_len(nla) < sizeof(*gw)) {
+               NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
+               return -EINVAL;
+       }
+
+       *gw = nla_get_in6_addr(nla);
+
+       return 0;
+}
+
 static int ip6_route_multipath_add(struct fib6_config *cfg,
                                   struct netlink_ext_ack *extack)
 {
@@ -5264,10 +5277,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        if (nla) {
-                               r_cfg.fc_gateway = nla_get_in6_addr(nla);
+                               err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
+                                                       extack);
+                               if (err)
+                                       goto cleanup;
+
                                r_cfg.fc_flags |= RTF_GATEWAY;
                        }
                        r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+
+                       /* RTA_ENCAP_TYPE length checked in
+                        * lwtunnel_valid_encap_type_attr
+                        */
                        nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
                        if (nla)
                                r_cfg.fc_encap_type = nla_get_u16(nla);
@@ -5434,7 +5455,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
 
                        nla = nla_find(attrs, attrlen, RTA_GATEWAY);
                        if (nla) {
-                               nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+                               err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
+                                                       extack);
+                               if (err) {
+                                       last_err = err;
+                                       goto next_rtnh;
+                               }
+
                                r_cfg.fc_flags |= RTF_GATEWAY;
                        }
                }
@@ -5442,6 +5469,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
                if (err)
                        last_err = err;
 
+next_rtnh:
                rtnh = rtnh_next(rtnh, &remaining);
        }
 
index a8b5784..73aaabf 100644 (file)
@@ -75,6 +75,65 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
        return true;
 }
 
+struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags)
+{
+       struct ipv6_sr_hdr *srh;
+       int len, srhoff = 0;
+
+       if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
+               return NULL;
+
+       if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
+               return NULL;
+
+       srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
+       len = (srh->hdrlen + 1) << 3;
+
+       if (!pskb_may_pull(skb, srhoff + len))
+               return NULL;
+
+       /* note that pskb_may_pull may change pointers in header;
+        * for this reason it is necessary to reload them when needed.
+        */
+       srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
+       if (!seg6_validate_srh(srh, len, true))
+               return NULL;
+
+       return srh;
+}
+
+/* Determine if an ICMP invoking packet contains a segment routing
+ * header.  If it does, extract the offset to the true destination
+ * address, which is in the first segment address.
+ */
+void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt)
+{
+       __u16 network_header = skb->network_header;
+       struct ipv6_sr_hdr *srh;
+
+       /* Update network header to point to the invoking packet
+        * inside the ICMP packet, so we can use the seg6_get_srh()
+        * helper.
+        */
+       skb_reset_network_header(skb);
+
+       srh = seg6_get_srh(skb, 0);
+       if (!srh)
+               goto out;
+
+       if (srh->type != IPV6_SRCRT_TYPE_4)
+               goto out;
+
+       opt->flags |= IP6SKB_SEG6;
+       opt->srhoff = (unsigned char *)srh - skb->data;
+
+out:
+       /* Restore the network header back to the ICMP packet */
+       skb->network_header = network_header;
+}
+
 static struct genl_family seg6_genl_family;
 
 static const struct nla_policy seg6_genl_policy[SEG6_ATTR_MAX + 1] = {
index 3adc5d9..d648550 100644 (file)
@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
                hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
 
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+               /* the control block has been erased, so we have to set the
+                * iif once again.
+                * We read the receiving interface index directly from the
+                * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
+                * ip_rcv_core(...)).
+                */
+               IP6CB(skb)->iif = skb->skb_iif;
        }
 
        hdr->nexthdr = NEXTHDR_ROUTING;
index 2dc40b3..ef88489 100644 (file)
@@ -150,40 +150,11 @@ static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
        return (struct seg6_local_lwt *)lwt->data;
 }
 
-static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb, int flags)
-{
-       struct ipv6_sr_hdr *srh;
-       int len, srhoff = 0;
-
-       if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
-               return NULL;
-
-       if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
-               return NULL;
-
-       srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
-       len = (srh->hdrlen + 1) << 3;
-
-       if (!pskb_may_pull(skb, srhoff + len))
-               return NULL;
-
-       /* note that pskb_may_pull may change pointers in header;
-        * for this reason it is necessary to reload them when needed.
-        */
-       srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
-       if (!seg6_validate_srh(srh, len, true))
-               return NULL;
-
-       return srh;
-}
-
 static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
 {
        struct ipv6_sr_hdr *srh;
 
-       srh = get_srh(skb, IP6_FH_F_SKIP_RH);
+       srh = seg6_get_srh(skb, IP6_FH_F_SKIP_RH);
        if (!srh)
                return NULL;
 
@@ -200,7 +171,7 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
        struct ipv6_sr_hdr *srh;
        unsigned int off = 0;
 
-       srh = get_srh(skb, 0);
+       srh = seg6_get_srh(skb, 0);
        if (srh && srh->segments_left > 0)
                return false;
 
index 1b57ee3..8a3618a 100644 (file)
@@ -1933,7 +1933,6 @@ static int __net_init sit_init_net(struct net *net)
        return 0;
 
 err_reg_dev:
-       ipip6_dev_free(sitn->fb_tunnel_dev);
        free_netdev(sitn->fb_tunnel_dev);
 err_alloc_dev:
        return err;
index 551fce4..680e648 100644 (file)
@@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        if (dst && dst_hold_safe(dst)) {
                const struct rt6_info *rt = (const struct rt6_info *)dst;
 
-               sk->sk_rx_dst = dst;
+               rcu_assign_pointer(sk->sk_rx_dst, dst);
                sk->sk_rx_dst_ifindex = skb->skb_iif;
                sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
        }
@@ -1505,7 +1505,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-               struct dst_entry *dst = sk->sk_rx_dst;
+               struct dst_entry *dst;
+
+               dst = rcu_dereference_protected(sk->sk_rx_dst,
+                                               lockdep_sock_is_held(sk));
 
                sock_rps_save_rxhash(sk, skb);
                sk_mark_napi_id(sk, skb);
@@ -1513,8 +1516,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                        if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
                            INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
                                            dst, sk->sk_rx_dst_cookie) == NULL) {
+                               RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
                                dst_release(dst);
-                               sk->sk_rx_dst = NULL;
                        }
                }
 
@@ -1874,7 +1877,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk_fullsock(sk)) {
-                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+                       struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, sk->sk_rx_dst_cookie);
index e43b31d..a0871c2 100644 (file)
@@ -40,6 +40,7 @@
 #include <net/transp_v6.h>
 #include <net/ip6_route.h>
 #include <net/raw.h>
+#include <net/seg6.h>
 #include <net/tcp_states.h>
 #include <net/ip6_checksum.h>
 #include <net/ip6_tunnel.h>
@@ -561,7 +562,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ipv6_pinfo *np;
        const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
        const struct in6_addr *saddr = &hdr->saddr;
-       const struct in6_addr *daddr = &hdr->daddr;
+       const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
        struct udphdr *uh = (struct udphdr *)(skb->data+offset);
        bool tunnel = false;
        struct sock *sk;
@@ -956,7 +957,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                struct dst_entry *dst = skb_dst(skb);
                int ret;
 
-               if (unlikely(sk->sk_rx_dst != dst))
+               if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
                        udp6_sk_rx_dst_set(sk, dst);
 
                if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -1070,7 +1071,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = READ_ONCE(sk->sk_rx_dst);
+       dst = rcu_dereference(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, sk->sk_rx_dst_cookie);
@@ -1204,7 +1205,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
                        kfree_skb(skb);
                        return -EINVAL;
                }
-               if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+               if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
index 470ff0c..7d2925b 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 /**
@@ -191,7 +191,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
        sband = ieee80211_get_sband(sdata);
        if (!sband)
                return;
-       he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type);
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(&sdata->vif));
        if (!he_cap)
                return;
 
index 430a585..74a878f 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -106,7 +106,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.addba_req.start_seq_num =
                                        cpu_to_le16(start_seq_num << 4);
 
-       ieee80211_tx_skb(sdata, skb);
+       ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -213,6 +213,8 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
        struct ieee80211_txq *txq = sta->sta.txq[tid];
        struct txq_info *txqi;
 
+       lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
        if (!txq)
                return;
 
@@ -290,7 +292,6 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
        ieee80211_assign_tid_tx(sta, tid, NULL);
 
        ieee80211_agg_splice_finish(sta->sdata, tid);
-       ieee80211_agg_start_txq(sta, tid, false);
 
        kfree_rcu(tid_tx, rcu_head);
 }
@@ -480,8 +481,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
 
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
-                                    tid_tx->dialog_token,
-                                    sta->tid_seq[tid] >> 4,
+                                    tid_tx->dialog_token, tid_tx->ssn,
                                     buf_size, tid_tx->timeout);
 
        WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
@@ -523,6 +523,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 
        params.ssn = sta->tid_seq[tid] >> 4;
        ret = drv_ampdu_action(local, sdata, &params);
+       tid_tx->ssn = params.ssn;
        if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
                return;
        } else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
@@ -889,6 +890,7 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        bool send_delba = false;
+       bool start_txq = false;
 
        ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
               sta->sta.addr, tid);
@@ -906,10 +908,14 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
                send_delba = true;
 
        ieee80211_remove_tid_tx(sta, tid);
+       start_txq = true;
 
  unlock_sta:
        spin_unlock_bh(&sta->lock);
 
+       if (start_txq)
+               ieee80211_agg_start_txq(sta, tid, false);
+
        if (send_delba)
                ieee80211_send_delba(sdata, sta->sta.addr, tid,
                        WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
index bd3d319..2d0dd69 100644 (file)
@@ -1264,7 +1264,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 
 error:
+       mutex_lock(&local->mtx);
        ieee80211_vif_release_channel(sdata);
+       mutex_unlock(&local->mtx);
+
        return err;
 }
 
index cd3731c..c336267 100644 (file)
@@ -1219,8 +1219,11 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
-       if (local->in_reconfig)
+       /* In reconfig don't transmit now, but mark for waking later */
+       if (local->in_reconfig) {
+               set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
                return;
+       }
 
        if (!check_sdata_in_driver(sdata))
                return;
index 5666bbb..482c98e 100644 (file)
@@ -647,6 +647,26 @@ struct mesh_csa_settings {
        struct cfg80211_csa_settings settings;
 };
 
+/**
+ * struct mesh_table
+ *
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containing all mesh_path objects
+ * @walk_lock: lock protecting walk_head
+ * @entries: number of entries in the table
+ */
+struct mesh_table {
+       struct hlist_head known_gates;
+       spinlock_t gates_lock;
+       struct rhashtable rhead;
+       struct hlist_head walk_head;
+       spinlock_t walk_lock;
+       atomic_t entries;               /* Up to MAX_MESH_NEIGHBOURS */
+};
+
 struct ieee80211_if_mesh {
        struct timer_list housekeeping_timer;
        struct timer_list mesh_path_timer;
@@ -721,8 +741,8 @@ struct ieee80211_if_mesh {
        /* offset from skb->data while building IE */
        int meshconf_offset;
 
-       struct mesh_table *mesh_paths;
-       struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+       struct mesh_table mesh_paths;
+       struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
        int mesh_paths_generation;
        int mpp_paths_generation;
 };
index 77080b4..b2b717a 100644 (file)
@@ -127,26 +127,6 @@ struct mesh_path {
        u32 path_change_count;
 };
 
-/**
- * struct mesh_table
- *
- * @known_gates: list of known mesh gates and their mpaths by the station. The
- * gate's mpath may or may not be resolved and active.
- * @gates_lock: protects updates to known_gates
- * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
- * @walk_head: linked list containing all mesh_path objects
- * @walk_lock: lock protecting walk_head
- * @entries: number of entries in the table
- */
-struct mesh_table {
-       struct hlist_head known_gates;
-       spinlock_t gates_lock;
-       struct rhashtable rhead;
-       struct hlist_head walk_head;
-       spinlock_t walk_lock;
-       atomic_t entries;               /* Up to MAX_MESH_NEIGHBOURS */
-};
-
 /* Recent multicast cache */
 /* RMC_BUCKETS must be a power of 2, maximum 256 */
 #define RMC_BUCKETS            256
@@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
 void mesh_path_flush_pending(struct mesh_path *mpath);
 void mesh_path_tx_pending(struct mesh_path *mpath);
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
 void mesh_path_timer(struct timer_list *t);
index 7cab1cf..acc1c29 100644 (file)
@@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
        mesh_path_free_rcu(tbl, mpath);
 }
 
-static struct mesh_table *mesh_table_alloc(void)
+static void mesh_table_init(struct mesh_table *tbl)
 {
-       struct mesh_table *newtbl;
+       INIT_HLIST_HEAD(&tbl->known_gates);
+       INIT_HLIST_HEAD(&tbl->walk_head);
+       atomic_set(&tbl->entries,  0);
+       spin_lock_init(&tbl->gates_lock);
+       spin_lock_init(&tbl->walk_lock);
 
-       newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
-       if (!newtbl)
-               return NULL;
-
-       INIT_HLIST_HEAD(&newtbl->known_gates);
-       INIT_HLIST_HEAD(&newtbl->walk_head);
-       atomic_set(&newtbl->entries,  0);
-       spin_lock_init(&newtbl->gates_lock);
-       spin_lock_init(&newtbl->walk_lock);
-       if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
-               kfree(newtbl);
-               return NULL;
-       }
-
-       return newtbl;
+       /* rhashtable_init() may fail only in case of wrong
+        * mesh_rht_params
+        */
+       WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
 }
 
 static void mesh_table_free(struct mesh_table *tbl)
 {
        rhashtable_free_and_destroy(&tbl->rhead,
                                    mesh_path_rht_free, tbl);
-       kfree(tbl);
 }
 
 /**
@@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
 struct mesh_path *
 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
 {
-       return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
+       return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
 }
 
 struct mesh_path *
 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
 {
-       return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
+       return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
 }
 
 static struct mesh_path *
@@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 struct mesh_path *
 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
 {
-       return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
+       return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
 }
 
 /**
@@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
 struct mesh_path *
 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
 {
-       return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
+       return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
 }
 
 /**
@@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
        int err;
 
        rcu_read_lock();
-       tbl = mpath->sdata->u.mesh.mesh_paths;
+       tbl = &mpath->sdata->u.mesh.mesh_paths;
 
        spin_lock_bh(&mpath->state_lock);
        if (mpath->is_gate) {
@@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
        if (!new_mpath)
                return ERR_PTR(-ENOMEM);
 
-       tbl = sdata->u.mesh.mesh_paths;
+       tbl = &sdata->u.mesh.mesh_paths;
        spin_lock_bh(&tbl->walk_lock);
        mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
                                                  &new_mpath->rhash,
@@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
                return -ENOMEM;
 
        memcpy(new_mpath->mpp, mpp, ETH_ALEN);
-       tbl = sdata->u.mesh.mpp_paths;
+       tbl = &sdata->u.mesh.mpp_paths;
 
        spin_lock_bh(&tbl->walk_lock);
        ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
 void mesh_plink_broken(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+       struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
        static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct mesh_path *mpath;
 
@@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
 void mesh_path_flush_by_nexthop(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+       struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
        struct mesh_path *mpath;
        struct hlist_node *n;
 
@@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
                               const u8 *proxy)
 {
-       struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
+       struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
        struct mesh_path *mpath;
        struct hlist_node *n;
 
@@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
  */
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 {
-       table_flush_by_iface(sdata->u.mesh.mesh_paths);
-       table_flush_by_iface(sdata->u.mesh.mpp_paths);
+       table_flush_by_iface(&sdata->u.mesh.mesh_paths);
+       table_flush_by_iface(&sdata->u.mesh.mpp_paths);
 }
 
 /**
@@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
        /* flush relevant mpp entries first */
        mpp_flush_by_proxy(sdata, addr);
 
-       err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
+       err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
        sdata->u.mesh.mesh_paths_generation++;
        return err;
 }
@@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
        struct mesh_path *gate;
        bool copy = false;
 
-       tbl = sdata->u.mesh.mesh_paths;
+       tbl = &sdata->u.mesh.mesh_paths;
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
        mesh_path_tx_pending(mpath);
 }
 
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
 {
-       struct mesh_table *tbl_path, *tbl_mpp;
-       int ret;
-
-       tbl_path = mesh_table_alloc();
-       if (!tbl_path)
-               return -ENOMEM;
-
-       tbl_mpp = mesh_table_alloc();
-       if (!tbl_mpp) {
-               ret = -ENOMEM;
-               goto free_path;
-       }
-
-       sdata->u.mesh.mesh_paths = tbl_path;
-       sdata->u.mesh.mpp_paths = tbl_mpp;
-
-       return 0;
-
-free_path:
-       mesh_table_free(tbl_path);
-       return ret;
+       mesh_table_init(&sdata->u.mesh.mesh_paths);
+       mesh_table_init(&sdata->u.mesh.mpp_paths);
 }
 
 static
@@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
 
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
 {
-       mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
-       mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
+       mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
+       mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
 }
 
 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
 {
-       mesh_table_free(sdata->u.mesh.mesh_paths);
-       mesh_table_free(sdata->u.mesh.mpp_paths);
+       mesh_table_free(&sdata->u.mesh.mesh_paths);
+       mesh_table_free(&sdata->u.mesh.mpp_paths);
 }
index 54ab0e1..3147ca8 100644 (file)
@@ -2452,11 +2452,18 @@ static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
                                           u16 tx_time)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u16 tid = ieee80211_get_tid(hdr);
-       int ac = ieee80211_ac_from_tid(tid);
-       struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+       u16 tid;
+       int ac;
+       struct ieee80211_sta_tx_tspec *tx_tspec;
        unsigned long now = jiffies;
 
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       tid = ieee80211_get_tid(hdr);
+       ac = ieee80211_ac_from_tid(tid);
+       tx_tspec = &ifmgd->tx_tspec[ac];
+
        if (likely(!tx_tspec->admitted_time))
                return;
 
@@ -5258,7 +5265,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
         */
        if (new_sta) {
                u32 rates = 0, basic_rates = 0;
-               bool have_higher_than_11mbit;
+               bool have_higher_than_11mbit = false;
                int min_rate = INT_MAX, min_rate_index = -1;
                const struct cfg80211_bss_ies *ies;
                int shift = ieee80211_vif_get_shift(&sdata->vif);
index 9541a4c..0544563 100644 (file)
@@ -2944,6 +2944,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        if (!fwd_skb)
                goto out;
 
+       fwd_skb->dev = sdata->dev;
        fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
        fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
        info = IEEE80211_SKB_CB(fwd_skb);
index 51b49f0..537535a 100644 (file)
@@ -644,13 +644,13 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* check if STA exists already */
        if (sta_info_get_bss(sdata, sta->sta.addr)) {
                err = -EEXIST;
-               goto out_err;
+               goto out_cleanup;
        }
 
        sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
        if (!sinfo) {
                err = -ENOMEM;
-               goto out_err;
+               goto out_cleanup;
        }
 
        local->num_sta++;
@@ -667,6 +667,15 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 
        list_add_tail_rcu(&sta->list, &local->sta_list);
 
+       /* update channel context before notifying the driver about state
+        * change, this enables driver using the updated channel context right away.
+        */
+       if (sta->sta_state >= IEEE80211_STA_ASSOC) {
+               ieee80211_recalc_min_chandef(sta->sdata);
+               if (!sta->sta.support_p2p_ps)
+                       ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
+       }
+
        /* notify driver */
        err = sta_info_insert_drv_state(local, sdata, sta);
        if (err)
@@ -674,12 +683,6 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 
        set_sta_flag(sta, WLAN_STA_INSERTED);
 
-       if (sta->sta_state >= IEEE80211_STA_ASSOC) {
-               ieee80211_recalc_min_chandef(sta->sdata);
-               if (!sta->sta.support_p2p_ps)
-                       ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
-       }
-
        /* accept BA sessions now */
        clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
@@ -706,8 +709,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
  out_drop_sta:
        local->num_sta--;
        synchronize_net();
+ out_cleanup:
        cleanup_single_sta(sta);
- out_err:
        mutex_unlock(&local->sta_mtx);
        kfree(sinfo);
        rcu_read_lock();
index ba27967..379fd36 100644 (file)
@@ -176,6 +176,7 @@ struct sta_info;
  * @failed_bar_ssn: ssn of the last failed BAR tx attempt
  * @bar_pending: BAR needs to be re-sent
  * @amsdu: support A-MSDU withing A-MDPU
+ * @ssn: starting sequence number of the session
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_tx {
        u8 stop_initiator;
        bool tx_stop;
        u16 buf_size;
+       u16 ssn;
 
        u16 failed_bar_ssn;
        bool bar_pending;
index 278945e..86a54df 100644 (file)
@@ -1822,15 +1822,15 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
        ieee80211_tx_result res = TX_CONTINUE;
 
+       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
+               CALL_TXH(ieee80211_tx_h_rate_ctrl);
+
        if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
                __skb_queue_tail(&tx->skbs, tx->skb);
                tx->skb = NULL;
                goto txh_done;
        }
 
-       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
-               CALL_TXH(ieee80211_tx_h_rate_ctrl);
-
        CALL_TXH(ieee80211_tx_h_michael_mic_add);
        CALL_TXH(ieee80211_tx_h_sequence);
        CALL_TXH(ieee80211_tx_h_fragment);
@@ -4191,11 +4191,11 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 
        ieee80211_aggr_check(sdata, sta, skb);
 
+       sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
+
        if (sta) {
                struct ieee80211_fast_tx *fast_tx;
 
-               sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-
                fast_tx = rcu_dereference(sta->fast_tx);
 
                if (fast_tx &&
index 43df2f0..0e4e195 100644 (file)
@@ -943,7 +943,12 @@ static void ieee80211_parse_extension_element(u32 *crc,
                                              struct ieee802_11_elems *elems)
 {
        const void *data = elem->data + 1;
-       u8 len = elem->datalen - 1;
+       u8 len;
+
+       if (!elem->datalen)
+               return;
+
+       len = elem->datalen - 1;
 
        switch (elem->data[0]) {
        case WLAN_EID_EXT_HE_MU_EDCA:
@@ -2063,7 +2068,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                chandef.chan = chan;
 
        skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len,
-                                    100 + ie_len);
+                                    local->scan_ies_len + ie_len);
        if (!skb)
                return NULL;
 
@@ -2646,6 +2651,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_unlock(&local->sta_mtx);
        }
 
+       /*
+        * If this is for hw restart things are still running.
+        * We may want to change that later, however.
+        */
+       if (local->open_count && (!suspended || reconfig_due_to_wowlan))
+               drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
+
        if (local->in_reconfig) {
                local->in_reconfig = false;
                barrier();
@@ -2664,13 +2676,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                        IEEE80211_QUEUE_STOP_REASON_SUSPEND,
                                        false);
 
-       /*
-        * If this is for hw restart things are still running.
-        * We may want to change that later, however.
-        */
-       if (local->open_count && (!suspended || reconfig_due_to_wowlan))
-               drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
-
        if (!suspended)
                return 0;
 
index 5cc0421..6ad3e33 100644 (file)
@@ -85,8 +85,8 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev)
        mutex_unlock(&net->mctp.neigh_lock);
 }
 
-// TODO: add a "source" flag so netlink can only delete static neighbours?
-static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
+static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid,
+                            enum mctp_neigh_source source)
 {
        struct net *net = dev_net(mdev->dev);
        struct mctp_neigh *neigh, *tmp;
@@ -94,7 +94,8 @@ static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
 
        mutex_lock(&net->mctp.neigh_lock);
        list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
-               if (neigh->dev == mdev && neigh->eid == eid) {
+               if (neigh->dev == mdev && neigh->eid == eid &&
+                   neigh->source == source) {
                        list_del_rcu(&neigh->list);
                        /* TODO: immediate RTM_DELNEIGH */
                        call_rcu(&neigh->rcu, __mctp_neigh_free);
@@ -202,7 +203,7 @@ static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (!mdev)
                return -ENODEV;
 
-       return mctp_neigh_remove(mdev, eid);
+       return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC);
 }
 
 static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event,
index 7b96be1..f523051 100644 (file)
@@ -700,6 +700,9 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
 
        msk_owned_by_me(msk);
 
+       if (sk->sk_state == TCP_LISTEN)
+               return;
+
        if (!rm_list->nr)
                return;
 
index c82a76d..54613f5 100644 (file)
@@ -1524,7 +1524,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
                        int ret = 0;
 
                        prev_ssk = ssk;
-                       mptcp_flush_join_list(msk);
+                       __mptcp_flush_join_list(msk);
                        ssk = mptcp_subflow_get_send(msk);
 
                        /* First check. If the ssk has changed since
@@ -2879,7 +2879,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                 */
                if (WARN_ON_ONCE(!new_mptcp_sock)) {
                        tcp_sk(newsk)->is_mptcp = 0;
-                       return newsk;
+                       goto out;
                }
 
                /* acquire the 2nd reference for the owning socket */
@@ -2891,6 +2891,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                                MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
        }
 
+out:
+       newsk->sk_kern_sock = kern;
        return newsk;
 }
 
index 0f1e661..f8efd47 100644 (file)
@@ -525,7 +525,6 @@ static bool mptcp_supported_sockopt(int level, int optname)
                case TCP_NODELAY:
                case TCP_THIN_LINEAR_TIMEOUTS:
                case TCP_CONGESTION:
-               case TCP_ULP:
                case TCP_CORK:
                case TCP_KEEPIDLE:
                case TCP_KEEPINTVL:
index bb5f165..c189b4c 100644 (file)
@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
                pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
                if (!pnest)
                        return -ENOMEM;
-               nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
+               rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
+               if (rc) {
+                       nla_nest_cancel(skb, pnest);
+                       return rc;
+               }
                if ((0x1 << np->id) == ndp->package_whitelist)
                        nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
                cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
index 770a631..4712a90 100644 (file)
@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 
        tstamp = nf_conn_tstamp_find(ct);
        if (tstamp) {
-               s32 timeout = ct->timeout - nfct_time_stamp;
+               s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
 
                tstamp->stop = ktime_get_real_ns();
                if (timeout < 0)
@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
        }
 
        /* We want the clashing entry to go away real soon: 1 second timeout. */
-       loser_ct->timeout = nfct_time_stamp + HZ;
+       WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
 
        /* IPS_NAT_CLASH removes the entry automatically on the first
         * reply.  Also prevents UDP tracker from moving the entry to
@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
        /* save hash for reusing when confirming */
        *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
        ct->status = 0;
-       ct->timeout = 0;
+       WRITE_ONCE(ct->timeout, 0);
        write_pnet(&ct->ct_net, net);
        memset(&ct->__nfct_init_offset, 0,
               offsetof(struct nf_conn, proto) -
index c7708bd..ec4164c 100644 (file)
@@ -1195,8 +1195,6 @@ restart:
                }
                hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
                                           hnnode) {
-                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
-                               continue;
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (nf_ct_is_expired(ct)) {
                                if (i < ARRAY_SIZE(nf_ct_evict) &&
@@ -1208,6 +1206,9 @@ restart:
                        if (!net_eq(net, nf_ct_net(ct)))
                                continue;
 
+                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+                               continue;
+
                        if (cb->args[1]) {
                                if (ct != last)
                                        continue;
@@ -1998,7 +1999,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
 
        if (timeout > INT_MAX)
                timeout = INT_MAX;
-       ct->timeout = nfct_time_stamp + (u32)timeout;
+       WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
 
        if (test_bit(IPS_DYING_BIT, &ct->status))
                return -ETIME;
index 87a7388..ed37bb9 100644 (file)
@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        if (timeout < 0)
                timeout = 0;
 
-       if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
-               ct->timeout = nfct_time_stamp + timeout;
+       if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+               WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
 }
 
 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
index c0851fe..c207728 100644 (file)
@@ -4481,9 +4481,9 @@ struct nft_set_elem_catchall {
 static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
                                     struct nft_set *set)
 {
-       struct nft_set_elem_catchall *catchall;
+       struct nft_set_elem_catchall *next, *catchall;
 
-       list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+       list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                list_del_rcu(&catchall->list);
                nft_set_elem_destroy(set, catchall->elem, true);
                kfree_rcu(catchall);
index 691ef4c..7f83f96 100644 (file)
@@ -556,7 +556,8 @@ __build_packet_message(struct nfnl_log_net *log,
                goto nla_put_failure;
 
        if (indev && skb->dev &&
-           skb->mac_header != skb->network_header) {
+           skb_mac_header_was_set(skb) &&
+           skb_mac_header_len(skb) != 0) {
                struct nfulnl_msg_packet_hw phw;
                int len;
 
index 4acc4b8..f0b9e21 100644 (file)
@@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        struct net_device *indev;
        struct net_device *outdev;
        struct nf_conn *ct = NULL;
-       enum ip_conntrack_info ctinfo;
+       enum ip_conntrack_info ctinfo = 0;
        struct nfnl_ct_hook *nfnl_ct;
        bool csum_verify;
        char *secdata = NULL;
@@ -560,7 +560,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                goto nla_put_failure;
 
        if (indev && entskb->dev &&
-           skb_mac_header_was_set(entskb)) {
+           skb_mac_header_was_set(entskb) &&
+           skb_mac_header_len(entskb) != 0) {
                struct nfqnl_msg_packet_hw phw;
                int len;
 
index af4ee87..dbe1f2e 100644 (file)
@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
 
        tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
        if (!tcph)
-               return;
+               goto err;
 
        opt = (u8 *)tcph;
        for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
                        continue;
 
                if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
-                       return;
+                       goto err;
 
                if (skb_ensure_writable(pkt->skb,
                                        nft_thoff(pkt) + i + priv->len))
-                       return;
+                       goto err;
 
                tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
                                              &tcphdr_len);
                if (!tcph)
-                       return;
+                       goto err;
 
                offset = i + priv->offset;
 
@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
 
                return;
        }
+       return;
+err:
+       regs->verdict.code = NFT_BREAK;
 }
 
 static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
index e517663..6f4116e 100644 (file)
@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
                        NFT_PIPAPO_AVX2_BUCKET_LOAD8(4,  lt, 4, pkt[4], bsize);
 
                        NFT_PIPAPO_AVX2_AND(5, 0, 1);
-                       NFT_PIPAPO_AVX2_BUCKET_LOAD8(6,  lt, 6, pkt[5], bsize);
+                       NFT_PIPAPO_AVX2_BUCKET_LOAD8(6,  lt, 5, pkt[5], bsize);
                        NFT_PIPAPO_AVX2_AND(7, 2, 3);
 
                        /* Stall */
index 775064c..f1ba7dd 100644 (file)
@@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(unsigned int))
                return -EINVAL;
 
-       if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
+       if (copy_from_sockptr(&opt, optval, sizeof(unsigned long)))
                return -EFAULT;
 
        switch (optname) {
index 334f63c..f184b0d 100644 (file)
@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_device_iter_exit(iter);
-       kfree(iter);
+       if (iter) {
+               nfc_device_iter_exit(iter);
+               kfree(iter);
+       }
 
        return 0;
 }
@@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_device_iter_exit(iter);
-       kfree(iter);
+       if (iter) {
+               nfc_device_iter_exit(iter);
+               kfree(iter);
+       }
 
        return 0;
 }
index 9713035..6d262d9 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/mpls.h>
 #include <net/ndisc.h>
 #include <net/nsh.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 #include "conntrack.h"
 #include "datapath.h"
@@ -860,6 +861,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 #endif
        bool post_ct = false;
        int res, err;
+       u16 zone = 0;
 
        /* Extract metadata from packet. */
        if (tun_info) {
@@ -898,6 +900,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                key->recirc_id = tc_ext ? tc_ext->chain : 0;
                OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
                post_ct = tc_ext ? tc_ext->post_ct : false;
+               zone = post_ct ? tc_ext->zone : 0;
        } else {
                key->recirc_id = 0;
        }
@@ -906,8 +909,11 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 #endif
 
        err = key_extract(skb, key);
-       if (!err)
+       if (!err) {
                ovs_ct_fill_key(skb, key, post_ct);   /* Must be after key_extract(). */
+               if (post_ct && !skb_get_nfct(skb))
+                       key->ct_zone = zone;
+       }
        return err;
 }
 
index 46943a1..76c2dca 100644 (file)
@@ -4492,9 +4492,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
 out_free_pg_vec:
-       bitmap_free(rx_owner_map);
-       if (pg_vec)
+       if (pg_vec) {
+               bitmap_free(rx_owner_map);
                free_pg_vec(pg_vec, order, req->tp_block_nr);
+       }
 out:
        return err;
 }
index a152591..65d463a 100644 (file)
@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
 
        err = pep_accept_conn(newsk, skb);
        if (err) {
+               __sock_put(sk);
                sock_put(newsk);
                newsk = NULL;
                goto drop;
@@ -946,6 +947,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
                        ret =  -EBUSY;
                else if (sk->sk_state == TCP_ESTABLISHED)
                        ret = -EISCONN;
+               else if (!pn->pn_sk.sobject)
+                       ret = -EADDRNOTAVAIL;
                else
                        ret = pep_sock_enable(sk, NULL, 0);
                release_sock(sk);
index a3bc4b5..b4cc699 100644 (file)
@@ -253,6 +253,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
                                 * should end up here, but if it
                                 * does, reset/destroy the connection.
                                 */
+                               kfree(conn->c_path);
                                kmem_cache_free(rds_conn_slab, conn);
                                conn = ERR_PTR(-EOPNOTSUPP);
                                goto out;
index 90866ae..ab35914 100644 (file)
@@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
                                   u8 family, u16 zone, bool *defrag)
 {
        enum ip_conntrack_info ctinfo;
-       struct qdisc_skb_cb cb;
        struct nf_conn *ct;
        int err = 0;
        bool frag;
+       u16 mru;
 
        /* Previously seen (loopback)? Ignore. */
        ct = nf_ct_get(skb, &ctinfo);
@@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
                return err;
 
        skb_get(skb);
-       cb = *qdisc_skb_cb(skb);
+       mru = tc_skb_cb(skb)->mru;
 
        if (family == NFPROTO_IPV4) {
                enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
 
                if (!err) {
                        *defrag = true;
-                       cb.mru = IPCB(skb)->frag_max_size;
+                       mru = IPCB(skb)->frag_max_size;
                }
        } else { /* NFPROTO_IPV6 */
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
 
                if (!err) {
                        *defrag = true;
-                       cb.mru = IP6CB(skb)->frag_max_size;
+                       mru = IP6CB(skb)->frag_max_size;
                }
 #else
                err = -EOPNOTSUPP;
@@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
        }
 
        if (err != -EINPROGRESS)
-               *qdisc_skb_cb(skb) = cb;
+               tc_skb_cb(skb)->mru = mru;
        skb_clear_hash(skb);
        skb->ignore_df = 1;
        return err;
@@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_action_update_bstats(&c->common, skb);
 
        if (clear) {
-               qdisc_skb_cb(skb)->post_ct = false;
+               tc_skb_cb(skb)->post_ct = false;
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
                        nf_conntrack_put(&ct->ct_general);
@@ -1048,7 +1048,8 @@ do_nat:
 out_push:
        skb_push_rcsum(skb, nh_ofs);
 
-       qdisc_skb_cb(skb)->post_ct = true;
+       tc_skb_cb(skb)->post_ct = true;
+       tc_skb_cb(skb)->zone = p->zone;
 out_clear:
        if (defrag)
                qdisc_skb_cb(skb)->pkt_len = skb->len;
index 2ef8f5a..35c74bd 100644 (file)
@@ -1617,12 +1617,15 @@ int tcf_classify(struct sk_buff *skb,
 
        /* If we missed on some chain */
        if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+               struct tc_skb_cb *cb = tc_skb_cb(skb);
+
                ext = tc_skb_ext_alloc(skb);
                if (WARN_ON_ONCE(!ext))
                        return TC_ACT_SHOT;
                ext->chain = last_executed_chain;
-               ext->mru = qdisc_skb_cb(skb)->mru;
-               ext->post_ct = qdisc_skb_cb(skb)->post_ct;
+               ext->mru = cb->mru;
+               ext->post_ct = cb->post_ct;
+               ext->zone = cb->zone;
        }
 
        return ret;
@@ -3687,6 +3690,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                                entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
                                break;
                        default:
+                               err = -EOPNOTSUPP;
                                goto err_out_locked;
                        }
                } else if (is_tcf_skbedit_ptype(act)) {
index aab13ba..ef54ed3 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <net/sch_generic.h>
 #include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
 #include <net/ip.h>
 #include <net/flow_dissector.h>
 #include <net/geneve.h>
@@ -309,7 +310,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                       struct tcf_result *res)
 {
        struct cls_fl_head *head = rcu_dereference_bh(tp->root);
-       bool post_ct = qdisc_skb_cb(skb)->post_ct;
+       bool post_ct = tc_skb_cb(skb)->post_ct;
+       u16 zone = tc_skb_cb(skb)->zone;
        struct fl_flow_key skb_key;
        struct fl_flow_mask *mask;
        struct cls_fl_filter *f;
@@ -327,7 +329,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
                                    fl_ct_info_to_flower_map,
                                    ARRAY_SIZE(fl_ct_info_to_flower_map),
-                                   post_ct);
+                                   post_ct, zone);
                skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
                skb_flow_dissect(skb, &mask->dissector, &skb_key,
                                 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
index 3c2300d..857aaeb 100644 (file)
@@ -2736,7 +2736,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
                           GFP_KERNEL);
        if (!q->tins)
-               goto nomem;
+               return -ENOMEM;
 
        for (i = 0; i < CAKE_MAX_TINS; i++) {
                struct cake_tin_data *b = q->tins + i;
@@ -2766,10 +2766,6 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        q->min_netlen = ~0;
        q->min_adjlen = ~0;
        return 0;
-
-nomem:
-       cake_destroy(sch);
-       return -ENOMEM;
 }
 
 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
index e007fc7..d733934 100644 (file)
@@ -666,9 +666,9 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
                }
        }
        for (i = q->nbands; i < oldbands; i++) {
-               qdisc_tree_flush_backlog(q->classes[i].qdisc);
-               if (i >= q->nstrict)
+               if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
                        list_del(&q->classes[i].alist);
+               qdisc_tree_flush_backlog(q->classes[i].qdisc);
        }
        q->nstrict = nstrict;
        memcpy(q->prio2band, priomap, sizeof(priomap));
index 830f355..d6aba6e 100644 (file)
@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
        struct fq_pie_sched_data *q = qdisc_priv(sch);
 
        tcf_block_put(q->block);
+       q->p_params.tupdate = 0;
        del_timer_sync(&q->adapt_timer);
        kvfree(q->flows);
 }
index 8c06381..5ded4c8 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 #include <net/netlink.h>
 #include <net/sch_generic.h>
+#include <net/pkt_sched.h>
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/ip6_fib.h>
@@ -137,7 +138,7 @@ err:
 
 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
 {
-       u16 mru = qdisc_skb_cb(skb)->mru;
+       u16 mru = tc_skb_cb(skb)->mru;
        int err;
 
        if (mru && skb->len > mru + skb->dev->hard_header_len)
index 0b7f9ba..d4ce58c 100644 (file)
@@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
        if (err < 0)
                return err;
 
-       if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
-               max_classes = QFQ_MAX_AGG_CLASSES;
-       else
-               max_classes = qdisc_dev(sch)->tx_queue_len + 1;
+       max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
+                           QFQ_MAX_AGG_CLASSES);
        /* max_cl_shift = floor(log_2(max_classes)) */
        max_cl_shift = __fls(max_classes);
        q->max_agg_classes = 1<<max_cl_shift;
index 760b367..034e2c7 100644 (file)
@@ -245,54 +245,49 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
                + 64;
 }
 
-static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
 {
        struct sctp_association *assoc = tsp->asoc;
-       struct sock *sk = tsp->asoc->base.sk;
        struct sctp_comm_param *commp = p;
-       struct sk_buff *in_skb = commp->skb;
+       struct sock *sk = ep->base.sk;
        const struct inet_diag_req_v2 *req = commp->r;
-       const struct nlmsghdr *nlh = commp->nlh;
-       struct net *net = sock_net(in_skb->sk);
+       struct sk_buff *skb = commp->skb;
        struct sk_buff *rep;
        int err;
 
        err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
        if (err)
-               goto out;
+               return err;
 
-       err = -ENOMEM;
        rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
        if (!rep)
-               goto out;
+               return -ENOMEM;
 
        lock_sock(sk);
-       if (sk != assoc->base.sk) {
-               release_sock(sk);
-               sk = assoc->base.sk;
-               lock_sock(sk);
+       if (ep != assoc->ep) {
+               err = -EAGAIN;
+               goto out;
        }
-       err = inet_sctp_diag_fill(sk, assoc, rep, req,
-                                 sk_user_ns(NETLINK_CB(in_skb).sk),
-                                 NETLINK_CB(in_skb).portid,
-                                 nlh->nlmsg_seq, 0, nlh,
-                                 commp->net_admin);
-       release_sock(sk);
+
+       err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
+                                 NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
+                                 commp->nlh, commp->net_admin);
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
-               kfree_skb(rep);
                goto out;
        }
+       release_sock(sk);
 
-       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+       return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
 
 out:
+       release_sock(sk);
+       kfree_skb(rep);
        return err;
 }
 
-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
 {
-       struct sctp_endpoint *ep = tsp->asoc->ep;
        struct sctp_comm_param *commp = p;
        struct sock *sk = ep->base.sk;
        struct sk_buff *skb = commp->skb;
@@ -302,6 +297,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
        int err = 0;
 
        lock_sock(sk);
+       if (ep != tsp->asoc->ep)
+               goto release;
        list_for_each_entry(assoc, &ep->asocs, asocs) {
                if (cb->args[4] < cb->args[1])
                        goto next;
@@ -344,9 +341,8 @@ release:
        return err;
 }
 
-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
+static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
 {
-       struct sctp_endpoint *ep = tsp->asoc->ep;
        struct sctp_comm_param *commp = p;
        struct sock *sk = ep->base.sk;
        const struct inet_diag_req_v2 *r = commp->r;
@@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 static int sctp_diag_dump_one(struct netlink_callback *cb,
                              const struct inet_diag_req_v2 *req)
 {
-       struct sk_buff *in_skb = cb->skb;
-       struct net *net = sock_net(in_skb->sk);
+       struct sk_buff *skb = cb->skb;
+       struct net *net = sock_net(skb->sk);
        const struct nlmsghdr *nlh = cb->nlh;
        union sctp_addr laddr, paddr;
        struct sctp_comm_param commp = {
-               .skb = in_skb,
+               .skb = skb,
                .r = req,
                .nlh = nlh,
-               .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
+               .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
        };
 
        if (req->sdiag_family == AF_INET) {
@@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb,
                paddr.v6.sin6_family = AF_INET6;
        }
 
-       return sctp_transport_lookup_process(sctp_tsp_dump_one,
+       return sctp_transport_lookup_process(sctp_sock_dump_one,
                                             net, &laddr, &paddr, &commp);
 }
 
@@ -505,8 +501,8 @@ skip:
        if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
                goto done;
 
-       sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
-                               net, &pos, &commp);
+       sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
+                                       net, &pos, &commp);
        cb->args[2] = pos;
 
 done:
index 48c9c2c..efffde7 100644 (file)
@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 }
 
 /* Final destructor for endpoint.  */
+static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
+{
+       struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
+       struct sock *sk = ep->base.sk;
+
+       sctp_sk(sk)->ep = NULL;
+       sock_put(sk);
+
+       kfree(ep);
+       SCTP_DBG_OBJCNT_DEC(ep);
+}
+
 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 {
        struct sock *sk;
@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
        if (sctp_sk(sk)->bind_hash)
                sctp_put_port(sk);
 
-       sctp_sk(sk)->ep = NULL;
-       /* Give up our hold on the sock */
-       sock_put(sk);
-
-       kfree(ep);
-       SCTP_DBG_OBJCNT_DEC(ep);
+       call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
 }
 
 /* Hold a reference to an endpoint. */
-void sctp_endpoint_hold(struct sctp_endpoint *ep)
+int sctp_endpoint_hold(struct sctp_endpoint *ep)
 {
-       refcount_inc(&ep->base.refcnt);
+       return refcount_inc_not_zero(&ep->base.refcnt);
 }
 
 /* Release a reference to an endpoint and clean up if there are
index 3339125..da08671 100644 (file)
@@ -5317,32 +5317,41 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
 }
 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
 
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
-                                 struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
                                  const union sctp_addr *laddr,
                                  const union sctp_addr *paddr, void *p)
 {
        struct sctp_transport *transport;
-       int err;
+       struct sctp_endpoint *ep;
+       int err = -ENOENT;
 
        rcu_read_lock();
        transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+       if (!transport) {
+               rcu_read_unlock();
+               return err;
+       }
+       ep = transport->asoc->ep;
+       if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+               sctp_transport_put(transport);
+               rcu_read_unlock();
+               return err;
+       }
        rcu_read_unlock();
-       if (!transport)
-               return -ENOENT;
 
-       err = cb(transport, p);
+       err = cb(ep, transport, p);
+       sctp_endpoint_put(ep);
        sctp_transport_put(transport);
-
        return err;
 }
 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
 
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
-                           int (*cb_done)(struct sctp_transport *, void *),
-                           struct net *net, int *pos, void *p) {
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+                                   struct net *net, int *pos, void *p)
+{
        struct rhashtable_iter hti;
        struct sctp_transport *tsp;
+       struct sctp_endpoint *ep;
        int ret;
 
 again:
@@ -5351,26 +5360,32 @@ again:
 
        tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
        for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
-               ret = cb(tsp, p);
-               if (ret)
-                       break;
+               ep = tsp->asoc->ep;
+               if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+                       ret = cb(ep, tsp, p);
+                       if (ret)
+                               break;
+                       sctp_endpoint_put(ep);
+               }
                (*pos)++;
                sctp_transport_put(tsp);
        }
        sctp_transport_walk_stop(&hti);
 
        if (ret) {
-               if (cb_done && !cb_done(tsp, p)) {
+               if (cb_done && !cb_done(ep, tsp, p)) {
                        (*pos)++;
+                       sctp_endpoint_put(ep);
                        sctp_transport_put(tsp);
                        goto again;
                }
+               sctp_endpoint_put(ep);
                sctp_transport_put(tsp);
        }
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
+EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
 
 /* 7.2.1 Association Status (SCTP_STATUS)
 
index 230072f..1c9289f 100644 (file)
@@ -194,7 +194,9 @@ static int smc_release(struct socket *sock)
        /* cleanup for a dangling non-blocking connect */
        if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
                tcp_abort(smc->clcsock->sk, ECONNABORTED);
-       flush_work(&smc->connect_work);
+
+       if (cancel_work_sync(&smc->connect_work))
+               sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
 
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
index f4286ca..1a4fc1c 100644 (file)
@@ -180,6 +180,11 @@ struct smc_connection {
        u16                     tx_cdc_seq;     /* sequence # for CDC send */
        u16                     tx_cdc_seq_fin; /* sequence # - tx completed */
        spinlock_t              send_lock;      /* protect wr_sends */
+       atomic_t                cdc_pend_tx_wr; /* number of pending tx CDC wqe
+                                                * - inc when post wqe,
+                                                * - dec on polled tx cqe
+                                                */
+       wait_queue_head_t       cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
        struct delayed_work     tx_work;        /* retry of smc_cdc_msg_send */
        u32                     tx_off;         /* base offset in peer rmb */
 
index 99acd33..84c8a43 100644 (file)
@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
        struct smc_sock *smc;
        int diff;
 
-       if (!conn)
-               /* already dismissed */
-               return;
-
        smc = container_of(conn, struct smc_sock, conn);
        bh_lock_sock(&smc->sk);
        if (!wc_status) {
@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
                              conn);
                conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
        }
+
+       if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
+           unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
+               wake_up(&conn->cdc_pend_tx_wq);
+       WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
+
        smc_tx_sndbuf_nonfull(smc);
        bh_unlock_sock(&smc->sk);
 }
@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
        conn->tx_cdc_seq++;
        conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
        smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
+
+       atomic_inc(&conn->cdc_pend_tx_wr);
+       smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
+
        rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
        if (!rc) {
                smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
        } else {
                conn->tx_cdc_seq--;
                conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
+               atomic_dec(&conn->cdc_pend_tx_wr);
        }
 
        return rc;
@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
        peer->token = htonl(local->token);
        peer->prod_flags.failover_validation = 1;
 
+       /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
+        * can handle properly
+        */
+       smc_cdc_add_pending_send(conn, pend);
+
+       atomic_inc(&conn->cdc_pend_tx_wr);
+       smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
+
        rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
+       if (unlikely(rc))
+               atomic_dec(&conn->cdc_pend_tx_wr);
+
        return rc;
 }
 
@@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
        return rc;
 }
 
-static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
-                             unsigned long data)
+void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
 {
-       struct smc_connection *conn = (struct smc_connection *)data;
-       struct smc_cdc_tx_pend *cdc_pend =
-               (struct smc_cdc_tx_pend *)tx_pend;
-
-       return cdc_pend->conn == conn;
-}
-
-static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
-{
-       struct smc_cdc_tx_pend *cdc_pend =
-               (struct smc_cdc_tx_pend *)tx_pend;
-
-       cdc_pend->conn = NULL;
-}
-
-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
-{
-       struct smc_link *link = conn->lnk;
-
-       smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
-                               smc_cdc_tx_filter, smc_cdc_tx_dismisser,
-                               (unsigned long)conn);
+       wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
 }
 
 /* Send a SMC-D CDC header.
index 0a0a89a..696cc11 100644 (file)
@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
                          struct smc_wr_buf **wr_buf,
                          struct smc_rdma_wr **wr_rdma_buf,
                          struct smc_cdc_tx_pend **pend);
-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
+void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
                     struct smc_cdc_tx_pend *pend);
 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
index 387d28b..a684936 100644 (file)
@@ -647,7 +647,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                struct smc_link *lnk = &lgr->lnk[i];
 
-               if (smc_link_usable(lnk))
+               if (smc_link_sendable(lnk))
                        lnk->state = SMC_LNK_INACTIVE;
        }
        wake_up_all(&lgr->llc_msg_waiter);
@@ -1127,7 +1127,7 @@ void smc_conn_free(struct smc_connection *conn)
                        smc_ism_unset_conn(conn);
                tasklet_kill(&conn->rx_tsklet);
        } else {
-               smc_cdc_tx_dismiss_slots(conn);
+               smc_cdc_wait_pend_tx_wr(conn);
                if (current_work() != &conn->abort_work)
                        cancel_work_sync(&conn->abort_work);
        }
@@ -1204,7 +1204,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
        smc_llc_link_clear(lnk, log);
        smcr_buf_unmap_lgr(lnk);
        smcr_rtoken_clear_link(lnk);
-       smc_ib_modify_qp_reset(lnk);
+       smc_ib_modify_qp_error(lnk);
        smc_wr_free_link(lnk);
        smc_ib_destroy_queue_pair(lnk);
        smc_ib_dealloc_protection_domain(lnk);
@@ -1336,7 +1336,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
                else
                        tasklet_unlock_wait(&conn->rx_tsklet);
        } else {
-               smc_cdc_tx_dismiss_slots(conn);
+               smc_cdc_wait_pend_tx_wr(conn);
        }
        smc_lgr_unregister_conn(conn);
        smc_close_active_abort(smc);
@@ -1459,11 +1459,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
 /* Called when an SMCR device is removed or the smc module is unloaded.
  * If smcibdev is given, all SMCR link groups using this device are terminated.
  * If smcibdev is NULL, all SMCR link groups are terminated.
+ *
+ * We must wait here for QPs been destroyed before we destroy the CQs,
+ * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
+ * smc_sock cannot be released.
  */
 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
 {
        struct smc_link_group *lgr, *lg;
        LIST_HEAD(lgr_free_list);
+       LIST_HEAD(lgr_linkdown_list);
        int i;
 
        spin_lock_bh(&smc_lgr_list.lock);
@@ -1475,7 +1480,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
                list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
                        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                                if (lgr->lnk[i].smcibdev == smcibdev)
-                                       smcr_link_down_cond_sched(&lgr->lnk[i]);
+                                       list_move_tail(&lgr->list, &lgr_linkdown_list);
                        }
                }
        }
@@ -1487,6 +1492,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
                __smc_lgr_terminate(lgr, false);
        }
 
+       list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
+               for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+                       if (lgr->lnk[i].smcibdev == smcibdev) {
+                               mutex_lock(&lgr->llc_conf_mutex);
+                               smcr_link_down_cond(&lgr->lnk[i]);
+                               mutex_unlock(&lgr->llc_conf_mutex);
+                       }
+               }
+       }
+
        if (smcibdev) {
                if (atomic_read(&smcibdev->lnk_cnt))
                        wait_event(smcibdev->lnks_deleted,
@@ -1586,7 +1601,6 @@ static void smcr_link_down(struct smc_link *lnk)
        if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
                return;
 
-       smc_ib_modify_qp_reset(lnk);
        to_lnk = smc_switch_conns(lgr, lnk, true);
        if (!to_lnk) { /* no backup link available */
                smcr_link_clear(lnk, true);
@@ -1824,6 +1838,7 @@ create:
        conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
        conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
        conn->urg_state = SMC_URG_READ;
+       init_waitqueue_head(&conn->cdc_pend_tx_wq);
        INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
        if (ini->is_smcd) {
                conn->rx_off = sizeof(struct smcd_cdc_msg);
index 59cef3b..d63b082 100644 (file)
@@ -415,6 +415,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
        return true;
 }
 
+static inline bool smc_link_sendable(struct smc_link *lnk)
+{
+       return smc_link_usable(lnk) &&
+               lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
+}
+
 static inline bool smc_link_active(struct smc_link *lnk)
 {
        return lnk->state == SMC_LNK_ACTIVE;
index d93055e..fe5d539 100644 (file)
@@ -109,12 +109,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
                            IB_QP_MAX_QP_RD_ATOMIC);
 }
 
-int smc_ib_modify_qp_reset(struct smc_link *lnk)
+int smc_ib_modify_qp_error(struct smc_link *lnk)
 {
        struct ib_qp_attr qp_attr;
 
        memset(&qp_attr, 0, sizeof(qp_attr));
-       qp_attr.qp_state = IB_QPS_RESET;
+       qp_attr.qp_state = IB_QPS_ERR;
        return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
 }
 
index 0758593..bfa1c6b 100644 (file)
@@ -90,6 +90,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
 int smc_ib_ready_link(struct smc_link *lnk);
 int smc_ib_modify_qp_rts(struct smc_link *lnk);
 int smc_ib_modify_qp_reset(struct smc_link *lnk);
+int smc_ib_modify_qp_error(struct smc_link *lnk);
 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
                             struct smc_buf_desc *buf_slot, u8 link_idx);
index b102680..3e9fd8a 100644 (file)
@@ -1630,7 +1630,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
        delllc.reason = htonl(rsn);
 
        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
-               if (!smc_link_usable(&lgr->lnk[i]))
+               if (!smc_link_sendable(&lgr->lnk[i]))
                        continue;
                if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
                        break;
index 600ab58..c6cfdea 100644 (file)
@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
 }
 
 /* wait till all pending tx work requests on the given link are completed */
-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
+void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
 {
-       if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
-                              SMC_WR_TX_WAIT_PENDING_TIME))
-               return 0;
-       else /* timeout */
-               return -EPIPE;
+       wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
 }
 
 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
        struct smc_wr_tx_pend pnd_snd;
        struct smc_link *link;
        u32 pnd_snd_idx;
-       int i;
 
        link = wc->qp->qp_context;
 
@@ -128,14 +123,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
        }
 
        if (wc->status) {
-               for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
-                       /* clear full struct smc_wr_tx_pend including .priv */
-                       memset(&link->wr_tx_pends[i], 0,
-                              sizeof(link->wr_tx_pends[i]));
-                       memset(&link->wr_tx_bufs[i], 0,
-                              sizeof(link->wr_tx_bufs[i]));
-                       clear_bit(i, link->wr_tx_mask);
-               }
                if (link->lgr->smc_version == SMC_V2) {
                        memset(link->wr_tx_v2_pend, 0,
                               sizeof(*link->wr_tx_v2_pend));
@@ -188,7 +175,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
 {
        *idx = link->wr_tx_cnt;
-       if (!smc_link_usable(link))
+       if (!smc_link_sendable(link))
                return -ENOLINK;
        for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
                if (!test_and_set_bit(*idx, link->wr_tx_mask))
@@ -231,7 +218,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
        } else {
                rc = wait_event_interruptible_timeout(
                        link->wr_tx_wait,
-                       !smc_link_usable(link) ||
+                       !smc_link_sendable(link) ||
                        lgr->terminating ||
                        (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
                        SMC_WR_TX_WAIT_FREE_SLOT_TIME);
@@ -358,18 +345,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
                        unsigned long timeout)
 {
        struct smc_wr_tx_pend *pend;
+       u32 pnd_idx;
        int rc;
 
        pend = container_of(priv, struct smc_wr_tx_pend, priv);
        pend->compl_requested = 1;
-       init_completion(&link->wr_tx_compl[pend->idx]);
+       pnd_idx = pend->idx;
+       init_completion(&link->wr_tx_compl[pnd_idx]);
 
        rc = smc_wr_tx_send(link, priv);
        if (rc)
                return rc;
        /* wait for completion by smc_wr_tx_process_cqe() */
        rc = wait_for_completion_interruptible_timeout(
-                                       &link->wr_tx_compl[pend->idx], timeout);
+                                       &link->wr_tx_compl[pnd_idx], timeout);
        if (rc <= 0)
                rc = -ENODATA;
        if (rc > 0)
@@ -419,25 +408,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
        return rc;
 }
 
-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
-                            smc_wr_tx_filter filter,
-                            smc_wr_tx_dismisser dismisser,
-                            unsigned long data)
-{
-       struct smc_wr_tx_pend_priv *tx_pend;
-       struct smc_wr_rx_hdr *wr_tx;
-       int i;
-
-       for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
-               wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
-               if (wr_tx->type != wr_tx_hdr_type)
-                       continue;
-               tx_pend = &link->wr_tx_pends[i].priv;
-               if (filter(tx_pend, data))
-                       dismisser(tx_pend);
-       }
-}
-
 /****************************** receive queue ********************************/
 
 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
@@ -673,10 +643,7 @@ void smc_wr_free_link(struct smc_link *lnk)
        smc_wr_wakeup_reg_wait(lnk);
        smc_wr_wakeup_tx_wait(lnk);
 
-       if (smc_wr_tx_wait_no_pending_sends(lnk))
-               memset(lnk->wr_tx_mask, 0,
-                      BITS_TO_LONGS(SMC_WR_BUF_CNT) *
-                                               sizeof(*lnk->wr_tx_mask));
+       smc_wr_tx_wait_no_pending_sends(lnk);
        wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
        wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
 
index f353311..47512cc 100644 (file)
@@ -22,7 +22,6 @@
 #define SMC_WR_BUF_CNT 16      /* # of ctrl buffers per link */
 
 #define SMC_WR_TX_WAIT_FREE_SLOT_TIME  (10 * HZ)
-#define SMC_WR_TX_WAIT_PENDING_TIME    (5 * HZ)
 
 #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
 
@@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
 
 static inline bool smc_wr_tx_link_hold(struct smc_link *link)
 {
-       if (!smc_link_usable(link))
+       if (!smc_link_sendable(link))
                return false;
        atomic_inc(&link->wr_tx_refcnt);
        return true;
@@ -130,7 +129,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
                             smc_wr_tx_filter filter,
                             smc_wr_tx_dismisser dismisser,
                             unsigned long data);
-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
+void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
 
 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
 int smc_wr_rx_post_init(struct smc_link *link);
index b4d9419..d293614 100644 (file)
@@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
                return -EEXIST;
 
        /* Allocate a new AEAD */
-       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
        if (unlikely(!tmp))
                return -ENOMEM;
 
@@ -1474,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
                return -EEXIST;
 
        /* Allocate crypto */
-       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       c = kzalloc(sizeof(*c), GFP_ATOMIC);
        if (!c)
                return -ENOMEM;
 
@@ -1488,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
        }
 
        /* Allocate statistic structure */
-       c->stats = alloc_percpu(struct tipc_crypto_stats);
+       c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
        if (!c->stats) {
                if (c->wq)
                        destroy_workqueue(c->wq);
@@ -2461,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
        }
 
        /* Lets duplicate it first */
-       skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
+       skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
        rcu_read_unlock();
 
        /* Now, generate new key, initiate & distribute it */
index ad570c2..3e63c83 100644 (file)
@@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
                msg_set_syn(hdr, 1);
        }
 
+       memset(&skaddr, 0, sizeof(skaddr));
+
        /* Determine destination */
        if (atype == TIPC_SERVICE_RANGE) {
                return tipc_sendmcast(sock, ua, m, dlen, timeout);
index 59ee1be..ec2c2af 100644 (file)
@@ -1299,7 +1299,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
        space_available = virtio_transport_space_update(sk, pkt);
 
        /* Update CID in case it has changed after a transport reset event */
-       vsk->local_addr.svm_cid = dst.svm_cid;
+       if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
+               vsk->local_addr.svm_cid = dst.svm_cid;
 
        if (space_available)
                sk->sk_write_space(sk);
index df87c7f..f8f01a3 100644 (file)
@@ -133,6 +133,7 @@ static u32 reg_is_indoor_portid;
 
 static void restore_regulatory_settings(bool reset_user, bool cached);
 static void print_regdomain(const struct ieee80211_regdomain *rd);
+static void reg_process_hint(struct regulatory_request *reg_request);
 
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
@@ -1098,6 +1099,8 @@ int reg_reload_regdb(void)
        const struct firmware *fw;
        void *db;
        int err;
+       const struct ieee80211_regdomain *current_regdomain;
+       struct regulatory_request *request;
 
        err = request_firmware(&fw, "regulatory.db", &reg_pdev->dev);
        if (err)
@@ -1118,8 +1121,26 @@ int reg_reload_regdb(void)
        if (!IS_ERR_OR_NULL(regdb))
                kfree(regdb);
        regdb = db;
-       rtnl_unlock();
 
+       /* reset regulatory domain */
+       current_regdomain = get_cfg80211_regdom();
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (!request) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+
+       request->wiphy_idx = WIPHY_IDX_INVALID;
+       request->alpha2[0] = current_regdomain->alpha2[0];
+       request->alpha2[1] = current_regdomain->alpha2[1];
+       request->initiator = NL80211_REGDOM_SET_BY_CORE;
+       request->user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+
+       reg_process_hint(request);
+
+out_unlock:
+       rtnl_unlock();
  out:
        release_firmware(fw);
        return err;
@@ -2338,6 +2359,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
        struct cfg80211_chan_def chandef = {};
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        enum nl80211_iftype iftype;
+       bool ret;
 
        wdev_lock(wdev);
        iftype = wdev->iftype;
@@ -2387,7 +2409,11 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_ADHOC:
-               return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+               wiphy_lock(wiphy);
+               ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+               wiphy_unlock(wiphy);
+
+               return ret;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
                return cfg80211_chandef_usable(wiphy, &chandef,
index bc4ad48..fd39bb6 100644 (file)
@@ -83,6 +83,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
                xskb = &pool->heads[i];
                xskb->pool = pool;
                xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+               INIT_LIST_HEAD(&xskb->free_list_node);
                if (pool->unaligned)
                        pool->free_heads[i] = xskb;
                else
index b9198e2..faf8cdb 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
 
 CFLAGS_sample-trace-array.o := -I$(src)
 obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
index 690e4a9..2877cb0 100644 (file)
@@ -4,6 +4,9 @@
 #include <linux/ftrace.h>
 #include <asm/asm-offsets.h>
 
+extern void my_direct_func1(void);
+extern void my_direct_func2(void);
+
 void my_direct_func1(void)
 {
        trace_printk("my direct func1\n");
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
new file mode 100644 (file)
index 0000000..6f43a39
--- /dev/null
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+extern void my_direct_func1(unsigned long ip);
+extern void my_direct_func2(unsigned long ip);
+
+void my_direct_func1(unsigned long ip)
+{
+       trace_printk("my direct func1 ip %lx\n", ip);
+}
+
+void my_direct_func2(unsigned long ip)
+{
+       trace_printk("my direct func2 ip %lx\n", ip);
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+#ifdef CONFIG_X86_64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:"
+"      pushq %rbp\n"
+"      movq %rsp, %rbp\n"
+"      pushq %rdi\n"
+"      movq 8(%rbp), %rdi\n"
+"      call my_direct_func1\n"
+"      popq %rdi\n"
+"      leave\n"
+"      ret\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+"      .type           my_tramp2, @function\n"
+"\n"
+"      .globl          my_tramp2\n"
+"   my_tramp2:"
+"      pushq %rbp\n"
+"      movq %rsp, %rbp\n"
+"      pushq %rdi\n"
+"      movq 8(%rbp), %rdi\n"
+"      call my_direct_func2\n"
+"      popq %rdi\n"
+"      leave\n"
+"      ret\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+"       .pushsection    .text, \"ax\", @progbits\n"
+"       .type           my_tramp1, @function\n"
+"       .globl          my_tramp1\n"
+"   my_tramp1:"
+"       lgr             %r1,%r15\n"
+"       stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"       stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"       lgr             %r2,%r0\n"
+"       brasl           %r14,my_direct_func1\n"
+"       aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"       lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       lgr             %r1,%r0\n"
+"       br              %r1\n"
+"       .size           my_tramp1, .-my_tramp1\n"
+"\n"
+"       .type           my_tramp2, @function\n"
+"       .globl          my_tramp2\n"
+"   my_tramp2:"
+"       lgr             %r1,%r15\n"
+"       stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"       stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"       lgr             %r2,%r0\n"
+"       brasl           %r14,my_direct_func2\n"
+"       aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"       lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       lgr             %r1,%r0\n"
+"       br              %r1\n"
+"       .size           my_tramp2, .-my_tramp2\n"
+"       .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+       (unsigned long)my_tramp1,
+       (unsigned long)my_tramp2,
+};
+
+static struct ftrace_ops direct;
+
+static int simple_thread(void *arg)
+{
+       static int t;
+       int ret = 0;
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(2 * HZ);
+
+               if (ret)
+                       continue;
+               t ^= 1;
+               ret = modify_ftrace_direct_multi(&direct, tramps[t]);
+               if (!ret)
+                       my_tramp = tramps[t];
+               WARN_ON_ONCE(ret);
+       }
+
+       return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_multi_init(void)
+{
+       int ret;
+
+       ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
+       ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
+
+       ret = register_ftrace_direct_multi(&direct, my_tramp);
+
+       if (!ret)
+               simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+       return ret;
+}
+
+static void __exit ftrace_direct_multi_exit(void)
+{
+       kthread_stop(simple_tsk);
+       unregister_ftrace_direct_multi(&direct, my_tramp);
+}
+
+module_init(ftrace_direct_multi_init);
+module_exit(ftrace_direct_multi_exit);
+
+MODULE_AUTHOR("Jiri Olsa");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()");
+MODULE_LICENSE("GPL");
index 6e0de72..b97e5ed 100644 (file)
@@ -5,6 +5,9 @@
 #include <linux/ftrace.h>
 #include <asm/asm-offsets.h>
 
+extern void my_direct_func(struct vm_area_struct *vma,
+                          unsigned long address, unsigned int flags);
+
 void my_direct_func(struct vm_area_struct *vma,
                        unsigned long address, unsigned int flags)
 {
index a30aa42..c918b13 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/ftrace.h>
 #include <asm/asm-offsets.h>
 
+extern void my_direct_func(struct task_struct *p);
+
 void my_direct_func(struct task_struct *p)
 {
        trace_printk("waking up %s-%d\n", p->comm, p->pid);
index 7d631aa..3ccb2c7 100755 (executable)
@@ -219,7 +219,7 @@ if ($arch eq "x86_64") {
 
 } elsif ($arch eq "s390" && $bits == 64) {
     if ($cc =~ /-DCC_USING_HOTPATCH/) {
-       $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
+       $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
        $mcount_adjust = 0;
     }
     $alignment = 8;
index 62d30c0..dde4ecc 100644 (file)
@@ -611,10 +611,11 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
        return 0;
 }
 
-static int parse_sid(struct super_block *sb, const char *s, u32 *sid)
+static int parse_sid(struct super_block *sb, const char *s, u32 *sid,
+                    gfp_t gfp)
 {
        int rc = security_context_str_to_sid(&selinux_state, s,
-                                            sid, GFP_KERNEL);
+                                            sid, gfp);
        if (rc)
                pr_warn("SELinux: security_context_str_to_sid"
                       "(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -685,7 +686,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
         */
        if (opts) {
                if (opts->fscontext) {
-                       rc = parse_sid(sb, opts->fscontext, &fscontext_sid);
+                       rc = parse_sid(sb, opts->fscontext, &fscontext_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
@@ -694,7 +696,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                        sbsec->flags |= FSCONTEXT_MNT;
                }
                if (opts->context) {
-                       rc = parse_sid(sb, opts->context, &context_sid);
+                       rc = parse_sid(sb, opts->context, &context_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
@@ -703,7 +706,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                        sbsec->flags |= CONTEXT_MNT;
                }
                if (opts->rootcontext) {
-                       rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid);
+                       rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
@@ -712,7 +716,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                        sbsec->flags |= ROOTCONTEXT_MNT;
                }
                if (opts->defcontext) {
-                       rc = parse_sid(sb, opts->defcontext, &defcontext_sid);
+                       rc = parse_sid(sb, opts->defcontext, &defcontext_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
@@ -2702,14 +2707,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
                return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
 
        if (opts->fscontext) {
-               rc = parse_sid(sb, opts->fscontext, &sid);
+               rc = parse_sid(sb, opts->fscontext, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
                        return 1;
        }
        if (opts->context) {
-               rc = parse_sid(sb, opts->context, &sid);
+               rc = parse_sid(sb, opts->context, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2719,14 +2724,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
                struct inode_security_struct *root_isec;
 
                root_isec = backing_inode_security(sb->s_root);
-               rc = parse_sid(sb, opts->rootcontext, &sid);
+               rc = parse_sid(sb, opts->rootcontext, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
                        return 1;
        }
        if (opts->defcontext) {
-               rc = parse_sid(sb, opts->defcontext, &sid);
+               rc = parse_sid(sb, opts->defcontext, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -2749,14 +2754,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
                return 0;
 
        if (opts->fscontext) {
-               rc = parse_sid(sb, opts->fscontext, &sid);
+               rc = parse_sid(sb, opts->fscontext, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
                        goto out_bad_option;
        }
        if (opts->context) {
-               rc = parse_sid(sb, opts->context, &sid);
+               rc = parse_sid(sb, opts->context, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2765,14 +2770,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
        if (opts->rootcontext) {
                struct inode_security_struct *root_isec;
                root_isec = backing_inode_security(sb->s_root);
-               rc = parse_sid(sb, opts->rootcontext, &sid);
+               rc = parse_sid(sb, opts->rootcontext, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
                        goto out_bad_option;
        }
        if (opts->defcontext) {
-               rc = parse_sid(sb, opts->defcontext, &sid);
+               rc = parse_sid(sb, opts->defcontext, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -5780,7 +5785,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
        struct sk_security_struct *sksec;
        struct common_audit_data ad;
        struct lsm_network_audit net = {0,};
-       u8 proto;
+       u8 proto = 0;
 
        sk = skb_to_full_sk(skb);
        if (sk == NULL)
index 1da2e37..6799b11 100644 (file)
@@ -1051,10 +1051,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                return false;
        if (!domain)
                return true;
+       if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
+               return false;
        list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
                                srcu_read_lock_held(&tomoyo_ss)) {
                u16 perm;
-               u8 i;
 
                if (ptr->is_deleted)
                        continue;
@@ -1065,23 +1066,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                 */
                switch (ptr->type) {
                case TOMOYO_TYPE_PATH_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_PATH2_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_PATH_NUMBER_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
+                       perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
                                  ->perm);
                        break;
                case TOMOYO_TYPE_MKDEV_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_INET_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_UNIX_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_MANUAL_TASK_ACL:
                        perm = 0;
@@ -1089,21 +1090,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                default:
                        perm = 1;
                }
-               for (i = 0; i < 16; i++)
-                       if (perm & (1 << i))
-                               count++;
+               count += hweight16(perm);
        }
        if (count < tomoyo_profile(domain->ns, domain->profile)->
            pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
                return true;
-       if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
-               domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
-               /* r->granted = false; */
-               tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+       WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
+       /* r->granted = false; */
+       tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
 #ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
-               pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
-                       domain->domainname->name);
+       pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
+               domain->domainname->name);
 #endif
-       }
        return false;
 }
index 470dabc..edff063 100644 (file)
@@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
                                  struct snd_ctl_elem_value *data,
                                  int type, int count)
 {
+       struct snd_ctl_elem_value32 __user *data32 = userdata;
        int i, size;
 
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
@@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
                if (copy_to_user(valuep, data->value.bytes.data, size))
                        return -EFAULT;
        }
+       if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
+               return -EFAULT;
        return 0;
 }
 
index 32350c6..537df1e 100644 (file)
@@ -509,6 +509,10 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
                return -ENOMEM;
 
        jack->id = kstrdup(id, GFP_KERNEL);
+       if (jack->id == NULL) {
+               kfree(jack);
+               return -ENOMEM;
+       }
 
        /* don't creat input device for phantom jack */
        if (!phantom_jack) {
index 82a8187..20a0a47 100644 (file)
@@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
  *
  * Return the maximum value for field PAR.
  */
-static unsigned int
+static int
 snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
                           snd_pcm_hw_param_t var, int *dir)
 {
@@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
                                   struct snd_pcm_hw_params *oss_params,
                                   struct snd_pcm_hw_params *slave_params)
 {
-       size_t s;
-       size_t oss_buffer_size, oss_period_size, oss_periods;
-       size_t min_period_size, max_period_size;
+       ssize_t s;
+       ssize_t oss_buffer_size;
+       ssize_t oss_period_size, oss_periods;
+       ssize_t min_period_size, max_period_size;
        struct snd_pcm_runtime *runtime = substream->runtime;
        size_t oss_frame_size;
 
        oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
                         params_channels(oss_params) / 8;
 
+       oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
+                                                    SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+                                                    NULL);
+       if (oss_buffer_size <= 0)
+               return -EINVAL;
        oss_buffer_size = snd_pcm_plug_client_size(substream,
-                                                  snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
-       if (!oss_buffer_size)
+                                                  oss_buffer_size * oss_frame_size);
+       if (oss_buffer_size <= 0)
                return -EINVAL;
        oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
        if (atomic_read(&substream->mmap_count)) {
@@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
 
        min_period_size = snd_pcm_plug_client_size(substream,
                                                   snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
-       if (min_period_size) {
+       if (min_period_size > 0) {
                min_period_size *= oss_frame_size;
                min_period_size = roundup_pow_of_two(min_period_size);
                if (oss_period_size < min_period_size)
@@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
 
        max_period_size = snd_pcm_plug_client_size(substream,
                                                   snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
-       if (max_period_size) {
+       if (max_period_size > 0) {
                max_period_size *= oss_frame_size;
                max_period_size = rounddown_pow_of_two(max_period_size);
                if (oss_period_size > max_period_size)
@@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
                oss_periods = substream->oss.setup.periods;
 
        s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
-       if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+       if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
                s = runtime->oss.maxfrags;
        if (oss_periods > s)
                oss_periods = s;
@@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
                err = -EINVAL;
                goto failure;
        }
-       choose_rate(substream, sparams, runtime->oss.rate);
-       snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
+
+       err = choose_rate(substream, sparams, runtime->oss.rate);
+       if (err < 0)
+               goto failure;
+       err = snd_pcm_hw_param_near(substream, sparams,
+                                   SNDRV_PCM_HW_PARAM_CHANNELS,
+                                   runtime->oss.channels, NULL);
+       if (err < 0)
+               goto failure;
 
        format = snd_pcm_oss_format_from(runtime->oss.format);
 
@@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
        if (runtime->oss.subdivision || runtime->oss.fragshift)
                return -EINVAL;
        fragshift = val & 0xffff;
-       if (fragshift >= 31)
+       if (fragshift >= 25) /* should be large enough */
                return -EINVAL;
        runtime->oss.fragshift = fragshift;
        runtime->oss.maxfrags = (val >> 16) & 0xffff;
index 6f30231..befa980 100644 (file)
@@ -447,6 +447,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
                err = -ENOMEM;
                goto __error;
        }
+       rawmidi_file->user_pversion = 0;
        init_waitqueue_entry(&wait, current);
        add_wait_queue(&rmidi->open_wait, &wait);
        while (1) {
index e1b69c6..e2b7be6 100644 (file)
@@ -397,7 +397,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
        }
        if (instr_4op) {
                vp2 = &opl3->voices[voice + 3];
-               if (vp->state > 0) {
+               if (vp2->state > 0) {
                        opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
                                               voice_offset + 3);
                        reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
index c0123bc..b7758db 100644 (file)
@@ -132,8 +132,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
                return AE_NOT_FOUND;
        }
 
-       info->handle = handle;
-
        /*
         * On some Intel platforms, multiple children of the HDAS
         * device can be found, but only one of them is the SoundWire
@@ -144,6 +142,9 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
        if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
                return AE_OK; /* keep going */
 
+       /* found the correct SoundWire controller */
+       info->handle = handle;
+
        /* device found, stop namespace walk */
        return AE_CTRL_TERMINATE;
 }
@@ -164,8 +165,14 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
        acpi_status status;
 
        info->handle = NULL;
+       /*
+        * In the HDAS ACPI scope, 'SNDW' may be either the child of
+        * 'HDAS' or the grandchild of 'HDAS'. So let's go through
+        * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
+        * device.
+        */
        status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
-                                    parent_handle, 1,
+                                    parent_handle, 2,
                                     sdw_intel_acpi_cb,
                                     NULL, info, NULL);
        if (ACPI_FAILURE(status) || info->handle == NULL)
index 415701b..ffcde74 100644 (file)
@@ -2947,7 +2947,8 @@ static int parse_intel_hdmi(struct hda_codec *codec)
 
 /* Intel Haswell and onwards; audio component with eld notifier */
 static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
-                                const int *port_map, int port_num, int dev_num)
+                                const int *port_map, int port_num, int dev_num,
+                                bool send_silent_stream)
 {
        struct hdmi_spec *spec;
        int err;
@@ -2980,7 +2981,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
         * Enable silent stream feature, if it is enabled via
         * module param or Kconfig option
         */
-       if (enable_silent_stream)
+       if (send_silent_stream)
                spec->send_silent_stream = true;
 
        return parse_intel_hdmi(codec);
@@ -2988,12 +2989,18 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
 
 static int patch_i915_hsw_hdmi(struct hda_codec *codec)
 {
-       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
+       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3,
+                                    enable_silent_stream);
 }
 
 static int patch_i915_glk_hdmi(struct hda_codec *codec)
 {
-       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
+       /*
+        * Silent stream calls audio component .get_power() from
+        * .pin_eld_notify(). On GLK this will deadlock in i915 due
+        * to the audio vs. CDCLK workaround.
+        */
+       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3, false);
 }
 
 static int patch_i915_icl_hdmi(struct hda_codec *codec)
@@ -3004,7 +3011,8 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
         */
        static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
 
-       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
+       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3,
+                                    enable_silent_stream);
 }
 
 static int patch_i915_tgl_hdmi(struct hda_codec *codec)
@@ -3016,7 +3024,8 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
        static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
        int ret;
 
-       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
+       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4,
+                                   enable_silent_stream);
        if (!ret) {
                struct hdmi_spec *spec = codec->spec;
 
index 9ce7457..28255e7 100644 (file)
@@ -6503,22 +6503,26 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
 /* for alc285_fixup_ideapad_s740_coef() */
 #include "ideapad_s740_helper.c"
 
-static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
-                                                           const struct hda_fixup *fix,
-                                                           int action)
+static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
+       WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
+       WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
+       WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
+       {}
+};
+
+static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
+                                          const struct hda_fixup *fix,
+                                          int action)
 {
        /*
-       * A certain other OS sets these coeffs to different values. On at least one TongFang
-       * barebone these settings might survive even a cold reboot. So to restore a clean slate the
-       * values are explicitly reset to default here. Without this, the external microphone is
-       * always in a plugged-in state, while the internal microphone is always in an unplugged
-       * state, breaking the ability to use the internal microphone.
-       */
-       alc_write_coef_idx(codec, 0x24, 0x0000);
-       alc_write_coef_idx(codec, 0x26, 0x0000);
-       alc_write_coef_idx(codec, 0x29, 0x3000);
-       alc_write_coef_idx(codec, 0x37, 0xfe05);
-       alc_write_coef_idx(codec, 0x45, 0x5089);
+        * A certain other OS sets these coeffs to different values. On at least
+        * one TongFang barebone these settings might survive even a cold
+        * reboot. So to restore a clean slate the values are explicitly reset
+        * to default here. Without this, the external microphone is always in a
+        * plugged-in state, while the internal microphone is always in an
+        * unplugged state, breaking the ability to use the internal microphone.
+        */
+       alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
 }
 
 static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
@@ -6542,6 +6546,23 @@ static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
        alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
 }
 
+static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
+                                                   const struct hda_fixup *fix,
+                                                   int action)
+{
+       /*
+        * The Clevo NJ51CU comes either with the ALC293 or the ALC256 codec,
+        * but uses the 0x8686 subproduct id in both cases. The ALC256 codec
+        * needs an additional quirk for sound working after suspend and resume.
+        */
+       if (codec->core.vendor_id == 0x10ec0256) {
+               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               snd_hda_codec_set_pincfg(codec, 0x19, 0x04a11120);
+       } else {
+               snd_hda_codec_set_pincfg(codec, 0x1a, 0x04a1113c);
+       }
+}
+
 enum {
        ALC269_FIXUP_GPIO2,
        ALC269_FIXUP_SONY_VAIO,
@@ -6759,9 +6780,10 @@ enum {
        ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
        ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
        ALC287_FIXUP_13S_GEN2_SPEAKERS,
-       ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
+       ALC256_FIXUP_SET_COEF_DEFAULTS,
        ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
        ALC233_FIXUP_NO_AUDIO_JACK,
+       ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8465,9 +8487,9 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE,
        },
-       [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+       [ALC256_FIXUP_SET_COEF_DEFAULTS] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+               .v.func = alc256_fixup_set_coef_defaults,
        },
        [ALC245_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
@@ -8486,6 +8508,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_fixup_no_audio_jack,
        },
+       [ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_mic_no_presence_and_resume,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8656,6 +8684,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+       SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
@@ -8701,6 +8730,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -8825,7 +8855,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
        SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8929,7 +8959,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
-       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
+       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9119,6 +9149,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
        {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
        {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+       {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -10231,6 +10262,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
        }
 }
 
+static void alc897_hp_automute_hook(struct hda_codec *codec,
+                                        struct hda_jack_callback *jack)
+{
+       struct alc_spec *spec = codec->spec;
+       int vref;
+
+       snd_hda_gen_hp_automute(codec, jack);
+       vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+       snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+                           vref);
+}
+
+static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+                                    const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+       }
+}
+
 static const struct coef_fw alc668_coefs[] = {
        WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03,    0x0),
        WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06,    0x0), WRITE_COEF(0x07, 0x0f80),
@@ -10311,6 +10363,8 @@ enum {
        ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
        ALC668_FIXUP_HEADSET_MIC,
        ALC668_FIXUP_MIC_DET_COEF,
+       ALC897_FIXUP_LENOVO_HEADSET_MIC,
+       ALC897_FIXUP_HEADSET_MIC_PIN,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -10717,6 +10771,19 @@ static const struct hda_fixup alc662_fixups[] = {
                        {}
                },
        },
+       [ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc897_fixup_lenovo_headset_mic,
+       },
+       [ALC897_FIXUP_HEADSET_MIC_PIN] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x03a11050 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10761,6 +10828,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
index 957eeb6..7e9a9a9 100644 (file)
@@ -146,10 +146,11 @@ static int snd_acp6x_probe(struct pci_dev *pci,
 {
        struct acp6x_dev_data *adata;
        struct platform_device_info pdevinfo[ACP6x_DEVS];
-       int ret, index;
+       int index = 0;
        int val = 0x00;
        u32 addr;
        unsigned int irqflags;
+       int ret;
 
        irqflags = IRQF_SHARED;
        /* Yellow Carp device check */
index 04cb747..b34a854 100644 (file)
@@ -929,6 +929,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
        unsigned int val, count;
 
        if (jack_insert) {
+               snd_soc_dapm_mutex_lock(dapm);
+
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
                        RT5682_PWR_VREF2 | RT5682_PWR_MB,
                        RT5682_PWR_VREF2 | RT5682_PWR_MB);
@@ -979,6 +981,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                snd_soc_component_update_bits(component, RT5682_MICBIAS_2,
                        RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK,
                        RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU);
+
+               snd_soc_dapm_mutex_unlock(dapm);
        } else {
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -2858,6 +2862,8 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
 
        for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
                struct clk_init_data init = { };
+               struct clk_parent_data parent_data;
+               const struct clk_hw *parent;
 
                dai_clk_hw = &rt5682->dai_clks_hw[i];
 
@@ -2865,17 +2871,17 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
                case RT5682_DAI_WCLK_IDX:
                        /* Make MCLK the parent of WCLK */
                        if (rt5682->mclk) {
-                               init.parent_data = &(struct clk_parent_data){
+                               parent_data = (struct clk_parent_data){
                                        .fw_name = "mclk",
                                };
+                               init.parent_data = &parent_data;
                                init.num_parents = 1;
                        }
                        break;
                case RT5682_DAI_BCLK_IDX:
                        /* Make WCLK the parent of BCLK */
-                       init.parent_hws = &(const struct clk_hw *){
-                               &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]
-                       };
+                       parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX];
+                       init.parent_hws = &parent;
                        init.num_parents = 1;
                        break;
                default:
index 470957f..d49a4f6 100644 (file)
@@ -2693,6 +2693,8 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
 
        for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) {
                struct clk_init_data init = { };
+               struct clk_parent_data parent_data;
+               const struct clk_hw *parent;
 
                dai_clk_hw = &rt5682s->dai_clks_hw[i];
 
@@ -2700,17 +2702,17 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
                case RT5682S_DAI_WCLK_IDX:
                        /* Make MCLK the parent of WCLK */
                        if (rt5682s->mclk) {
-                               init.parent_data = &(struct clk_parent_data){
+                               parent_data = (struct clk_parent_data){
                                        .fw_name = "mclk",
                                };
+                               init.parent_data = &parent_data;
                                init.num_parents = 1;
                        }
                        break;
                case RT5682S_DAI_BCLK_IDX:
                        /* Make WCLK the parent of BCLK */
-                       init.parent_hws = &(const struct clk_hw *){
-                               &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]
-                       };
+                       parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX];
+                       init.parent_hws = &parent;
                        init.num_parents = 1;
                        break;
                default:
index 172e79c..6549e7f 100644 (file)
@@ -291,11 +291,11 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
                ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
                                TAS2770_TDM_CFG_REG0_31_88_2_96KHZ;
                break;
-       case 19200:
+       case 192000:
                ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_48KHZ |
                                TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
                break;
-       case 17640:
+       case 176400:
                ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
                                TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
                break;
index 4f568ab..e63c6b7 100644 (file)
@@ -3256,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
        int value = ucontrol->value.integer.value[0];
        int sel;
 
+       if (wcd->comp_enabled[comp] == value)
+               return 0;
+
        wcd->comp_enabled[comp] = value;
        sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER :
                WCD934X_HPH_GAIN_SRC_SEL_REGISTER;
@@ -3279,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
        case COMPANDER_8:
                break;
        default:
-               break;
+               return 0;
        }
 
-       return 0;
+       return 1;
 }
 
 static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc,
@@ -3326,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc,
        return 0;
 }
 
+static int slim_rx_mux_to_dai_id(int mux)
+{
+       int aif_id;
+
+       switch (mux) {
+       case 1:
+               aif_id = AIF1_PB;
+               break;
+       case 2:
+               aif_id = AIF2_PB;
+               break;
+       case 3:
+               aif_id = AIF3_PB;
+               break;
+       case 4:
+               aif_id = AIF4_PB;
+               break;
+       default:
+               aif_id = -1;
+               break;
+       }
+
+       return aif_id;
+}
+
 static int slim_rx_mux_put(struct snd_kcontrol *kc,
                           struct snd_ctl_elem_value *ucontrol)
 {
@@ -3333,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
        struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev);
        struct soc_enum *e = (struct soc_enum *)kc->private_value;
        struct snd_soc_dapm_update *update = NULL;
+       struct wcd934x_slim_ch *ch, *c;
        u32 port_id = w->shift;
+       bool found = false;
+       int mux_idx;
+       int prev_mux_idx = wcd->rx_port_value[port_id];
+       int aif_id;
 
-       if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
-               return 0;
+       mux_idx = ucontrol->value.enumerated.item[0];
 
-       wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+       if (mux_idx == prev_mux_idx)
+               return 0;
 
-       switch (wcd->rx_port_value[port_id]) {
+       switch(mux_idx) {
        case 0:
-               list_del_init(&wcd->rx_chs[port_id].list);
-               break;
-       case 1:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF1_PB].slim_ch_list);
-               break;
-       case 2:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF2_PB].slim_ch_list);
-               break;
-       case 3:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF3_PB].slim_ch_list);
+               aif_id = slim_rx_mux_to_dai_id(prev_mux_idx);
+               if (aif_id < 0)
+                       return 0;
+
+               list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) {
+                       if (ch->port == port_id + WCD934X_RX_START) {
+                               found = true;
+                               list_del_init(&ch->list);
+                               break;
+                       }
+               }
+               if (!found)
+                       return 0;
+
                break;
-       case 4:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF4_PB].slim_ch_list);
+       case 1 ... 4:
+               aif_id = slim_rx_mux_to_dai_id(mux_idx);
+               if (aif_id < 0)
+                       return 0;
+
+               if (list_empty(&wcd->rx_chs[port_id].list)) {
+                       list_add_tail(&wcd->rx_chs[port_id].list,
+                                     &wcd->dai[aif_id].slim_ch_list);
+               } else {
+                       dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id);
+                       return 0;
+               }
                break;
+
        default:
-               dev_err(wcd->dev, "Unknown AIF %d\n",
-                       wcd->rx_port_value[port_id]);
+               dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx);
                goto err;
        }
 
+       wcd->rx_port_value[port_id] = mux_idx;
        snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id],
                                      e, update);
 
-       return 0;
+       return 1;
 err:
        return -EINVAL;
 }
@@ -3815,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        struct soc_mixer_control *mixer =
                        (struct soc_mixer_control *)kc->private_value;
        int enable = ucontrol->value.integer.value[0];
+       struct wcd934x_slim_ch *ch, *c;
        int dai_id = widget->shift;
        int port_id = mixer->shift;
 
@@ -3822,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        if (enable == wcd->tx_port_value[port_id])
                return 0;
 
-       wcd->tx_port_value[port_id] = enable;
-
-       if (enable)
-               list_add_tail(&wcd->tx_chs[port_id].list,
-                             &wcd->dai[dai_id].slim_ch_list);
-       else
-               list_del_init(&wcd->tx_chs[port_id].list);
+       if (enable) {
+               if (list_empty(&wcd->tx_chs[port_id].list)) {
+                       list_add_tail(&wcd->tx_chs[port_id].list,
+                                     &wcd->dai[dai_id].slim_ch_list);
+               } else {
+                       dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id);
+                       return 0;
+               }
+        } else {
+               bool found = false;
+
+               list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) {
+                       if (ch->port == port_id) {
+                               found = true;
+                               list_del_init(&wcd->tx_chs[port_id].list);
+                               break;
+                       }
+               }
+               if (!found)
+                       return 0;
+        }
 
+       wcd->tx_port_value[port_id] = enable;
        snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
 
-       return 0;
+       return 1;
 }
 
 static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = {
index 2da4a5f..564b78f 100644 (file)
@@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
 
                usleep_range(1000, 1010);
        }
-       return 0;
+
+       return 1;
 }
 
 static int wsa881x_get_port(struct snd_kcontrol *kcontrol,
@@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        int portidx = mixer->reg;
 
-       if (ucontrol->value.integer.value[0])
+       if (ucontrol->value.integer.value[0]) {
+               if (data->port_enable[portidx])
+                       return 0;
+
                data->port_enable[portidx] = true;
-       else
+       } else {
+               if (!data->port_enable[portidx])
+                       return 0;
+
                data->port_enable[portidx] = false;
+       }
 
        if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */
                wsa881x_boost_ctrl(comp, data->port_enable[portidx]);
 
-       return 0;
+       return 1;
 }
 
 static const char * const smart_boost_lvl_text[] = {
index 9322245..67729de 100644 (file)
@@ -18,7 +18,6 @@
 #define AIU_RST_SOFT_I2S_FAST          BIT(0)
 
 #define AIU_I2S_DAC_CFG_MSB_FIRST      BIT(2)
-#define AIU_I2S_MISC_HOLD_EN           BIT(2)
 #define AIU_CLK_CTRL_I2S_DIV_EN                BIT(0)
 #define AIU_CLK_CTRL_I2S_DIV           GENMASK(3, 2)
 #define AIU_CLK_CTRL_AOCLK_INVERT      BIT(6)
@@ -36,37 +35,6 @@ static void aiu_encoder_i2s_divider_enable(struct snd_soc_component *component,
                                      enable ? AIU_CLK_CTRL_I2S_DIV_EN : 0);
 }
 
-static void aiu_encoder_i2s_hold(struct snd_soc_component *component,
-                                bool enable)
-{
-       snd_soc_component_update_bits(component, AIU_I2S_MISC,
-                                     AIU_I2S_MISC_HOLD_EN,
-                                     enable ? AIU_I2S_MISC_HOLD_EN : 0);
-}
-
-static int aiu_encoder_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
-                                  struct snd_soc_dai *dai)
-{
-       struct snd_soc_component *component = dai->component;
-
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               aiu_encoder_i2s_hold(component, false);
-               return 0;
-
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               aiu_encoder_i2s_hold(component, true);
-               return 0;
-
-       default:
-               return -EINVAL;
-       }
-}
-
 static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component,
                                      struct snd_pcm_hw_params *params)
 {
@@ -353,7 +321,6 @@ static void aiu_encoder_i2s_shutdown(struct snd_pcm_substream *substream,
 }
 
 const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = {
-       .trigger        = aiu_encoder_i2s_trigger,
        .hw_params      = aiu_encoder_i2s_hw_params,
        .hw_free        = aiu_encoder_i2s_hw_free,
        .set_fmt        = aiu_encoder_i2s_set_fmt,
index 2388a2d..57e6e71 100644 (file)
@@ -20,6 +20,8 @@
 #define AIU_MEM_I2S_CONTROL_MODE_16BIT BIT(6)
 #define AIU_MEM_I2S_BUF_CNTL_INIT      BIT(0)
 #define AIU_RST_SOFT_I2S_FAST          BIT(0)
+#define AIU_I2S_MISC_HOLD_EN           BIT(2)
+#define AIU_I2S_MISC_FORCE_LEFT_RIGHT  BIT(4)
 
 #define AIU_FIFO_I2S_BLOCK             256
 
@@ -90,6 +92,10 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
        unsigned int val;
        int ret;
 
+       snd_soc_component_update_bits(component, AIU_I2S_MISC,
+                                     AIU_I2S_MISC_HOLD_EN,
+                                     AIU_I2S_MISC_HOLD_EN);
+
        ret = aiu_fifo_hw_params(substream, params, dai);
        if (ret)
                return ret;
@@ -117,6 +123,19 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
        snd_soc_component_update_bits(component, AIU_MEM_I2S_MASKS,
                                      AIU_MEM_I2S_MASKS_IRQ_BLOCK, val);
 
+       /*
+        * Most (all?) supported SoCs have this bit set by default. The vendor
+        * driver however sets it manually (depending on the version either
+        * while un-setting AIU_I2S_MISC_HOLD_EN or right before that). Follow
+        * the same approach for consistency with the vendor driver.
+        */
+       snd_soc_component_update_bits(component, AIU_I2S_MISC,
+                                     AIU_I2S_MISC_FORCE_LEFT_RIGHT,
+                                     AIU_I2S_MISC_FORCE_LEFT_RIGHT);
+
+       snd_soc_component_update_bits(component, AIU_I2S_MISC,
+                                     AIU_I2S_MISC_HOLD_EN, 0);
+
        return 0;
 }
 
index 4ad2326..d67ff4c 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/clk.h>
+#include <linux/dma-mapping.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 #include <sound/soc-dai.h>
@@ -179,6 +180,11 @@ int aiu_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd,
        struct snd_card *card = rtd->card->snd_card;
        struct aiu_fifo *fifo = dai->playback_dma_data;
        size_t size = fifo->pcm->buffer_bytes_max;
+       int ret;
+
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
                                       card->dev, size, size);
index cd74681..928fd23 100644 (file)
@@ -498,14 +498,16 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
        struct session_data *session = &data->sessions[session_id];
 
        if (ucontrol->value.integer.value[0]) {
+               if (session->port_id == be_id)
+                       return 0;
+
                session->port_id = be_id;
                snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
        } else {
-               if (session->port_id == be_id) {
-                       session->port_id = -1;
+               if (session->port_id == -1 || session->port_id != be_id)
                        return 0;
-               }
 
+               session->port_id = -1;
                snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
        }
 
index 17b9b28..5f9cb5c 100644 (file)
@@ -95,6 +95,7 @@ struct rk_i2s_tdm_dev {
        spinlock_t lock; /* xfer lock */
        bool has_playback;
        bool has_capture;
+       struct snd_soc_dai_driver *dai;
 };
 
 static int to_ch_num(unsigned int val)
@@ -1310,19 +1311,14 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = {
        {},
 };
 
-static struct snd_soc_dai_driver i2s_tdm_dai = {
+static const struct snd_soc_dai_driver i2s_tdm_dai = {
        .probe = rockchip_i2s_tdm_dai_probe,
-       .playback = {
-               .stream_name  = "Playback",
-       },
-       .capture = {
-               .stream_name  = "Capture",
-       },
        .ops = &rockchip_i2s_tdm_dai_ops,
 };
 
-static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
+static int rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
 {
+       struct snd_soc_dai_driver *dai;
        struct property *dma_names;
        const char *dma_name;
        u64 formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
@@ -1337,19 +1333,33 @@ static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
                        i2s_tdm->has_capture = true;
        }
 
+       dai = devm_kmemdup(i2s_tdm->dev, &i2s_tdm_dai,
+                          sizeof(*dai), GFP_KERNEL);
+       if (!dai)
+               return -ENOMEM;
+
        if (i2s_tdm->has_playback) {
-               i2s_tdm_dai.playback.channels_min = 2;
-               i2s_tdm_dai.playback.channels_max = 8;
-               i2s_tdm_dai.playback.rates = SNDRV_PCM_RATE_8000_192000;
-               i2s_tdm_dai.playback.formats = formats;
+               dai->playback.stream_name  = "Playback";
+               dai->playback.channels_min = 2;
+               dai->playback.channels_max = 8;
+               dai->playback.rates = SNDRV_PCM_RATE_8000_192000;
+               dai->playback.formats = formats;
        }
 
        if (i2s_tdm->has_capture) {
-               i2s_tdm_dai.capture.channels_min = 2;
-               i2s_tdm_dai.capture.channels_max = 8;
-               i2s_tdm_dai.capture.rates = SNDRV_PCM_RATE_8000_192000;
-               i2s_tdm_dai.capture.formats = formats;
+               dai->capture.stream_name  = "Capture";
+               dai->capture.channels_min = 2;
+               dai->capture.channels_max = 8;
+               dai->capture.rates = SNDRV_PCM_RATE_8000_192000;
+               dai->capture.formats = formats;
        }
+
+       if (i2s_tdm->clk_trcm != TRCM_TXRX)
+               dai->symmetric_rate = 1;
+
+       i2s_tdm->dai = dai;
+
+       return 0;
 }
 
 static int rockchip_i2s_tdm_path_check(struct rk_i2s_tdm_dev *i2s_tdm,
@@ -1541,8 +1551,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
        spin_lock_init(&i2s_tdm->lock);
        i2s_tdm->soc_data = (struct rk_i2s_soc_data *)of_id->data;
 
-       rockchip_i2s_tdm_init_dai(i2s_tdm);
-
        i2s_tdm->frame_width = 64;
 
        i2s_tdm->clk_trcm = TRCM_TXRX;
@@ -1555,8 +1563,10 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
                }
                i2s_tdm->clk_trcm = TRCM_RX;
        }
-       if (i2s_tdm->clk_trcm != TRCM_TXRX)
-               i2s_tdm_dai.symmetric_rate = 1;
+
+       ret = rockchip_i2s_tdm_init_dai(i2s_tdm);
+       if (ret)
+               return ret;
 
        i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
        if (IS_ERR(i2s_tdm->grf))
@@ -1678,7 +1688,7 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
 
        ret = devm_snd_soc_register_component(&pdev->dev,
                                              &rockchip_i2s_tdm_component,
-                                             &i2s_tdm_dai, 1);
+                                             i2s_tdm->dai, 1);
 
        if (ret) {
                dev_err(&pdev->dev, "Could not register DAI\n");
index 6744318..13cd96e 100644 (file)
@@ -22,6 +22,7 @@
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
 #define IDISP_VID_INTEL        0x80860000
+#define CODEC_PROBE_RETRIES 3
 
 /* load the legacy HDA codec driver */
 static int request_codec_module(struct hda_codec *codec)
@@ -121,12 +122,15 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
        u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
                (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
        u32 resp = -1;
-       int ret;
+       int ret, retry = 0;
+
+       do {
+               mutex_lock(&hbus->core.cmd_mutex);
+               snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+               snd_hdac_bus_get_response(&hbus->core, address, &resp);
+               mutex_unlock(&hbus->core.cmd_mutex);
+       } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
 
-       mutex_lock(&hbus->core.cmd_mutex);
-       snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
-       snd_hdac_bus_get_response(&hbus->core, address, &resp);
-       mutex_unlock(&hbus->core.cmd_mutex);
        if (resp == -1)
                return -EIO;
        dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
index f2ea34d..fd46210 100644 (file)
@@ -112,8 +112,12 @@ static const struct pci_device_id sof_pci_ids[] = {
                .driver_data = (unsigned long)&adls_desc},
        { PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */
                .driver_data = (unsigned long)&adl_desc},
+       { PCI_DEVICE(0x8086, 0x51cd), /* ADL-P */
+               .driver_data = (unsigned long)&adl_desc},
        { PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */
                .driver_data = (unsigned long)&adl_desc},
+       { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */
+               .driver_data = (unsigned long)&adl_desc},
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, sof_pci_ids);
index 933c450..3785cad 100644 (file)
@@ -514,8 +514,8 @@ static int tegra210_adx_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_adx_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_adx_runtime_suspend,
                           tegra210_adx_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_adx_driver = {
index 6895763..d064cc6 100644 (file)
@@ -583,8 +583,8 @@ static int tegra210_amx_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_amx_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_amx_runtime_suspend,
                           tegra210_amx_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_amx_driver = {
index 51d3755..16e679a 100644 (file)
@@ -666,8 +666,8 @@ static int tegra210_mixer_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_mixer_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_mixer_runtime_suspend,
                           tegra210_mixer_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_mixer_driver = {
index 85b1558..acf5932 100644 (file)
@@ -164,7 +164,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        if (err < 0)
                goto end;
 
-       return 1;
+       err = 1;
 
 end:
        pm_runtime_put(cmpnt->dev);
@@ -236,7 +236,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
                           TEGRA210_MVC_VOLUME_SWITCH_MASK,
                           TEGRA210_MVC_VOLUME_SWITCH_TRIGGER);
 
-       return 1;
+       err = 1;
 
 end:
        pm_runtime_put(cmpnt->dev);
@@ -639,8 +639,8 @@ static int tegra210_mvc_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_mvc_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_mvc_runtime_suspend,
                           tegra210_mvc_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_mvc_driver = {
index 7a2227e..368f077 100644 (file)
@@ -3594,8 +3594,8 @@ static int tegra210_sfc_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_sfc_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_sfc_runtime_suspend,
                           tegra210_sfc_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_sfc_driver = {
index b95438c..a734048 100644 (file)
@@ -116,16 +116,24 @@ static const struct snd_kcontrol_new tegra_machine_controls[] = {
        SOC_DAPM_PIN_SWITCH("Headset Mic"),
        SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
        SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
+       SOC_DAPM_PIN_SWITCH("Headphones"),
+       SOC_DAPM_PIN_SWITCH("Mic Jack"),
 };
 
 int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_card *card = rtd->card;
        struct tegra_machine *machine = snd_soc_card_get_drvdata(card);
+       const char *jack_name;
        int err;
 
        if (machine->gpiod_hp_det && machine->asoc->add_hp_jack) {
-               err = snd_soc_card_jack_new(card, "Headphones Jack",
+               if (machine->asoc->hp_jack_name)
+                       jack_name = machine->asoc->hp_jack_name;
+               else
+                       jack_name = "Headphones Jack";
+
+               err = snd_soc_card_jack_new(card, jack_name,
                                            SND_JACK_HEADPHONE,
                                            &tegra_machine_hp_jack,
                                            tegra_machine_hp_jack_pins,
@@ -658,6 +666,7 @@ static struct snd_soc_card snd_soc_tegra_max98090 = {
 static const struct tegra_asoc_data tegra_max98090_data = {
        .mclk_rate = tegra_machine_mclk_rate_12mhz,
        .card = &snd_soc_tegra_max98090,
+       .hp_jack_name = "Headphones",
        .add_common_dapm_widgets = true,
        .add_common_controls = true,
        .add_common_snd_ops = true,
index d6a8d13..6f795d7 100644 (file)
@@ -14,6 +14,7 @@ struct snd_soc_pcm_runtime;
 struct tegra_asoc_data {
        unsigned int (*mclk_rate)(unsigned int srate);
        const char *codec_dev_name;
+       const char *hp_jack_name;
        struct snd_soc_card *card;
        unsigned int mclk_id;
        bool hp_jack_gpio_active_low;
index d489c1d..823b6b8 100644 (file)
@@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
 
 
 static const struct snd_djm_device snd_djm_devices[] = {
-       SND_DJM_DEVICE(250mk2),
-       SND_DJM_DEVICE(750),
-       SND_DJM_DEVICE(750mk2),
-       SND_DJM_DEVICE(850),
-       SND_DJM_DEVICE(900nxs2)
+       [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2),
+       [SND_DJM_750_IDX] = SND_DJM_DEVICE(750),
+       [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+       [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+       [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
 };
 
 
index a59cb0e..73409e2 100644 (file)
@@ -83,6 +83,7 @@ struct btf_id {
                int      cnt;
        };
        int              addr_cnt;
+       bool             is_set;
        Elf64_Addr       addr[ADDR_CNT];
 };
 
@@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj)
                         * in symbol's size, together with 'cnt' field hence
                         * that - 1.
                         */
-                       if (id)
+                       if (id) {
                                id->cnt = sym.st_size / sizeof(int) - 1;
+                               id->is_set = true;
+                       }
                } else {
                        pr_err("FAILED unsupported prefix %s\n", prefix);
                        return -1;
@@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
        int *ptr = data->d_buf;
        int i;
 
-       if (!id->id) {
+       if (!id->id && !id->is_set)
                pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
-       }
 
        for (i = 0; i < id->addr_cnt; i++) {
                unsigned long addr = id->addr[i];
index 45a9a59..ae61f46 100644 (file)
@@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC :=                  \
         numa_num_possible_cpus          \
         libperl                         \
         libpython                       \
-        libpython-version               \
         libslang                        \
         libslang-include-subdir         \
         libtraceevent                   \
index 0a3244a..1480910 100644 (file)
@@ -32,7 +32,6 @@ FILES=                                          \
          test-numa_num_possible_cpus.bin        \
          test-libperl.bin                       \
          test-libpython.bin                     \
-         test-libpython-version.bin             \
          test-libslang.bin                      \
          test-libslang-include-subdir.bin       \
          test-libtraceevent.bin                 \
@@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin:
 $(OUTPUT)test-libpython.bin:
        $(BUILD) $(FLAGS_PYTHON_EMBED)
 
-$(OUTPUT)test-libpython-version.bin:
-       $(BUILD)
-
 $(OUTPUT)test-libbfd.bin:
        $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
 
index 0b243ce..5ffafb9 100644 (file)
 # include "test-libpython.c"
 #undef main
 
-#define main main_test_libpython_version
-# include "test-libpython-version.c"
-#undef main
-
 #define main main_test_libperl
 # include "test-libperl.c"
 #undef main
 int main(int argc, char *argv[])
 {
        main_test_libpython();
-       main_test_libpython_version();
        main_test_libperl();
        main_test_hello();
        main_test_libelf();
diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
deleted file mode 100644 (file)
index 47714b9..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <Python.h>
-
-#if PY_VERSION_HEX >= 0x03000000
-       #error
-#endif
-
-int main(void)
-{
-       return 0;
-}
diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h
deleted file mode 100644 (file)
index 72d595c..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
-#define _LIBLOCKDEP_DEBUG_LOCKS_H_
-
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <asm/bug.h>
-
-#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
-
-extern bool debug_locks;
-extern bool debug_locks_silent;
-
-#endif
diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h
deleted file mode 100644 (file)
index b25580b..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
-#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
-
-#define SOFTIRQ_BITS   0UL
-#define HARDIRQ_BITS   0UL
-#define SOFTIRQ_SHIFT  0UL
-#define HARDIRQ_SHIFT  0UL
-#define hardirq_count()        0UL
-#define softirq_count()        0UL
-
-#endif
diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h
deleted file mode 100644 (file)
index 501262a..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-
-# define lockdep_hardirq_context()     0
-# define lockdep_softirq_context(p)    0
-# define lockdep_hardirqs_enabled()    0
-# define lockdep_softirqs_enabled(p)   0
-# define lockdep_hardirq_enter()       do { } while (0)
-# define lockdep_hardirq_exit()                do { } while (0)
-# define lockdep_softirq_enter()       do { } while (0)
-# define lockdep_softirq_exit()                do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-
-# define stop_critical_timings() do { } while (0)
-# define start_critical_timings() do { } while (0)
-
-#define raw_local_irq_disable() do { } while (0)
-#define raw_local_irq_enable() do { } while (0)
-#define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) ((void)(flags))
-#define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) ((void)(flags))
-#define raw_irqs_disabled() 0
-#define raw_safe_halt()
-
-#define local_irq_enable() do { } while (0)
-#define local_irq_disable() do { } while (0)
-#define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) ((void)(flags))
-#define local_save_flags(flags)        ((flags) = 0)
-#define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) ((void)(flags), 0)
-#define safe_halt() do { } while (0)
-
-#define trace_lock_release(x, y)
-#define trace_lock_acquire(a, b, c, d, e, f, g)
-
-#endif
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
deleted file mode 100644 (file)
index e569972..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LOCKDEP_H_
-#define _LIBLOCKDEP_LOCKDEP_H_
-
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <limits.h>
-#include <linux/utsname.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/kern_levels.h>
-#include <linux/err.h>
-#include <linux/rcu.h>
-#include <linux/list.h>
-#include <linux/hardirq.h>
-#include <unistd.h>
-
-#define MAX_LOCK_DEPTH 63UL
-
-#define asmlinkage
-#define __visible
-
-#include "../../../include/linux/lockdep.h"
-
-struct task_struct {
-       u64 curr_chain_key;
-       int lockdep_depth;
-       unsigned int lockdep_recursion;
-       struct held_lock held_locks[MAX_LOCK_DEPTH];
-       gfp_t lockdep_reclaim_gfp;
-       int pid;
-       int state;
-       char comm[17];
-};
-
-#define TASK_RUNNING 0
-
-extern struct task_struct *__curr(void);
-
-#define current (__curr())
-
-static inline int debug_locks_off(void)
-{
-       return 1;
-}
-
-#define task_pid_nr(tsk) ((tsk)->pid)
-
-#define KSYM_NAME_LEN 128
-#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
-#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define pr_warn pr_err
-#define pr_cont pr_err
-
-#define list_del_rcu list_del
-
-#define atomic_t unsigned long
-#define atomic_inc(x) ((*(x))++)
-
-#define print_tainted() ""
-#define static_obj(x) 1
-
-#define debug_show_all_locks()
-extern void debug_check_no_locks_held(void);
-
-static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
-{
-       return false;
-}
-
-#endif
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
deleted file mode 100644 (file)
index 8b3b03b..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
-#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
-
-#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
index c934572..622266b 100644 (file)
@@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
        return true;
 }
 
-#include <linux/lockdep.h>
-
 #endif
diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h
deleted file mode 100644 (file)
index ae343ac..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
-#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
-
-#include <execinfo.h>
-
-struct stack_trace {
-       unsigned int nr_entries, max_entries;
-       unsigned long *entries;
-       int skip;
-};
-
-static inline void print_stack_trace(struct stack_trace *trace, int spaces)
-{
-       backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
-}
-
-#define save_stack_trace(trace)        \
-       ((trace)->nr_entries =  \
-               backtrace((void **)(trace)->entries, (trace)->max_entries))
-
-static inline int dump_stack(void)
-{
-       void *array[64];
-       size_t size;
-
-       size = backtrace(array, 64);
-       backtrace_symbols_fd(array, size, 1);
-
-       return 0;
-}
-
-#endif
index afd1447..3df74cf 100644 (file)
@@ -271,8 +271,6 @@ endif
 
 FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
 FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
 
 FEATURE_CHECK_LDFLAGS-libaio = -lrt
 
index 7bef917..15109af 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index df5261e..ed9c5c2 100644 (file)
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448  common    process_mrelease        sys_process_mrelease            sys_process_mrelease
+449  common    futex_waitv             sys_futex_waitv                 sys_futex_waitv
index fa0ff4c..488f6e6 100644 (file)
@@ -223,8 +223,6 @@ static unsigned int group(pthread_t *pth,
                snd_ctx->out_fds[i] = fds[1];
                if (!thread_mode)
                        close(fds[0]);
-
-               free(ctx);
        }
 
        /* Now we have all the fds, fork the senders */
@@ -241,8 +239,6 @@ static unsigned int group(pthread_t *pth,
                for (i = 0; i < num_fds; i++)
                        close(snd_ctx->out_fds[i]);
 
-       free(snd_ctx);
-
        /* Return number of children to reap */
        return num_fds * 2;
 }
index bc5259d..409b721 100644 (file)
@@ -755,12 +755,16 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str,
        return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
 }
 
+static int output_fd(struct perf_inject *inject)
+{
+       return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
+}
+
 static int __cmd_inject(struct perf_inject *inject)
 {
        int ret = -EINVAL;
        struct perf_session *session = inject->session;
-       struct perf_data *data_out = &inject->output;
-       int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
+       int fd = output_fd(inject);
        u64 output_data_offset;
 
        signal(SIGINT, sig_handler);
@@ -820,7 +824,7 @@ static int __cmd_inject(struct perf_inject *inject)
                inject->tool.ordered_events = true;
                inject->tool.ordering_requires_timestamps = true;
                /* Allow space in the header for new attributes */
-               output_data_offset = 4096;
+               output_data_offset = roundup(8192 + session->header.data_offset, 4096);
                if (inject->strip)
                        strip_init(inject);
        }
@@ -1015,7 +1019,7 @@ int cmd_inject(int argc, const char **argv)
        }
 
        inject.session = __perf_session__new(&data, repipe,
-                                            perf_data__fd(&inject.output),
+                                            output_fd(&inject),
                                             &inject.tool);
        if (IS_ERR(inject.session)) {
                ret = PTR_ERR(inject.session);
@@ -1078,7 +1082,8 @@ out_delete:
        zstd_fini(&(inject.session->zstd_data));
        perf_session__delete(inject.session);
 out_close_output:
-       perf_data__close(&inject.output);
+       if (!inject.in_place_update)
+               perf_data__close(&inject.output);
        free(inject.itrace_synth_opts.vm_tm_corr_args);
        return ret;
 }
index 9434367..c82b033 100644 (file)
@@ -2473,7 +2473,7 @@ static int process_switch_event(struct perf_tool *tool,
        if (perf_event__process_switch(tool, event, sample, machine) < 0)
                return -1;
 
-       if (scripting_ops && scripting_ops->process_switch)
+       if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
                scripting_ops->process_switch(event, sample, machine);
 
        if (!script->show_switch_events)
index 1d3a189..66452a8 100644 (file)
@@ -32,8 +32,7 @@ try:
 except:
        broken_pipe_exception = IOError
 
-glb_switch_str         = None
-glb_switch_printed     = True
+glb_switch_str         = {}
 glb_insn               = False
 glb_disassembler       = None
 glb_src                        = False
@@ -70,6 +69,7 @@ def trace_begin():
        ap = argparse.ArgumentParser(usage = "", add_help = False)
        ap.add_argument("--insn-trace", action='store_true')
        ap.add_argument("--src-trace", action='store_true')
+       ap.add_argument("--all-switch-events", action='store_true')
        global glb_args
        global glb_insn
        global glb_src
@@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
        print(start_str, src_str)
 
 def do_process_event(param_dict):
-       global glb_switch_printed
-       if not glb_switch_printed:
-               print(glb_switch_str)
-               glb_switch_printed = True
        event_attr = param_dict["attr"]
        sample     = param_dict["sample"]
        raw_buf    = param_dict["raw_buf"]
@@ -274,6 +270,11 @@ def do_process_event(param_dict):
        dso    = get_optional(param_dict, "dso")
        symbol = get_optional(param_dict, "symbol")
 
+       cpu = sample["cpu"]
+       if cpu in glb_switch_str:
+               print(glb_switch_str[cpu])
+               del glb_switch_str[cpu]
+
        if name[0:12] == "instructions":
                if glb_src:
                        print_srccode(comm, param_dict, sample, symbol, dso, True)
@@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
                sys.exit(1)
 
 def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
-       global glb_switch_printed
-       global glb_switch_str
        if out:
                out_str = "Switch out "
        else:
@@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
                machine_str = ""
        else:
                machine_str = "machine PID %d" % machine_pid
-       glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
+       switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
                (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
-       glb_switch_printed = False
+       if glb_args.all_switch_events:
+               print(switch_str);
+       else:
+               global glb_switch_str
+               glb_switch_str[cpu] = switch_str
index c895de4..d54c537 100644 (file)
@@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
        TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
        TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
        TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
-       TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+       if (num_dies) // Some platforms do not have CPU die support, for example s390
+               TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
 
        /*
         * Source count returns the number of events aggregating in a leader
index 574b7e4..07b6f4e 100644 (file)
@@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
        struct evsel *evsel;
        u64 count;
 
+       perf_stat__reset_shadow_stats();
        evlist__for_each_entry(evlist, evsel) {
                count = find_value(evsel->name, vals);
                perf_stat__update_shadow_stats(evsel, count, 0, st);
index e9bfe85..b1be59b 100644 (file)
@@ -170,9 +170,11 @@ void ui__exit(bool wait_for_ok)
                                    "Press any key...", 0);
 
        SLtt_set_cursor_visibility(1);
-       SLsmg_refresh();
-       SLsmg_reset_smg();
+       if (!pthread_mutex_trylock(&ui__lock)) {
+               SLsmg_refresh();
+               SLsmg_reset_smg();
+               pthread_mutex_unlock(&ui__lock);
+       }
        SLang_reset_tty();
-
        perf_error__unregister(&perf_tui_eops);
 }
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
deleted file mode 100644 (file)
index 186a555..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-// Copyright (c) 2021 Facebook
-
-#ifndef __BPERF_STAT_H
-#define __BPERF_STAT_H
-
-typedef struct {
-       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
-       __uint(key_size, sizeof(__u32));
-       __uint(value_size, sizeof(struct bpf_perf_event_value));
-       __uint(max_entries, 1);
-} reading_map;
-
-#endif /* __BPERF_STAT_H */
index b8fa3cb..f193998 100644 (file)
@@ -1,14 +1,23 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include "bperf.h"
 #include "bperf_u.h"
 
-reading_map diff_readings SEC(".maps");
-reading_map accum_readings SEC(".maps");
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} diff_readings SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} accum_readings SEC(".maps");
 
 struct {
        __uint(type, BPF_MAP_TYPE_HASH);
index 4f70d14..e2a2d4c 100644 (file)
@@ -1,10 +1,8 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include "bperf.h"
 
 struct {
        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -13,8 +11,19 @@ struct {
        __uint(map_flags, BPF_F_PRESERVE_ELEMS);
 } events SEC(".maps");
 
-reading_map prev_readings SEC(".maps");
-reading_map diff_readings SEC(".maps");
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} prev_readings SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} diff_readings SEC(".maps");
 
 SEC("raw_tp/sched_switch")
 int BPF_PROG(on_switch)
index ab12b4c..97037d3 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2020 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
index 95ffed6..c59331e 100644 (file)
@@ -44,13 +44,16 @@ struct perf_event_attr;
 /* perf sample has 16 bits size limit */
 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
 
+/* number of register is bound by the number of bits in regs_dump::mask (64) */
+#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64))
+
 struct regs_dump {
        u64 abi;
        u64 mask;
        u64 *regs;
 
        /* Cached values/mask filled by first register access. */
-       u64 cache_regs[PERF_REGS_MAX];
+       u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE];
        u64 cache_mask;
 };
 
index 1d532b9..666b59b 100644 (file)
@@ -12,6 +12,7 @@
 #include "expr-bison.h"
 #include "expr-flex.h"
 #include "smt.h"
+#include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/zalloc.h>
 #include <ctype.h>
@@ -65,7 +66,12 @@ static bool key_equal(const void *key1, const void *key2,
 
 struct hashmap *ids__new(void)
 {
-       return hashmap__new(key_hash, key_equal, NULL);
+       struct hashmap *hash;
+
+       hash = hashmap__new(key_hash, key_equal, NULL);
+       if (IS_ERR(hash))
+               return NULL;
+       return hash;
 }
 
 void ids__free(struct hashmap *ids)
@@ -299,6 +305,10 @@ struct expr_parse_ctx *expr__ctx_new(void)
                return NULL;
 
        ctx->ids = hashmap__new(key_hash, key_equal, NULL);
+       if (IS_ERR(ctx->ids)) {
+               free(ctx);
+               return NULL;
+       }
        ctx->runtime = 0;
 
        return ctx;
index 79cce21..e3c1a53 100644 (file)
@@ -2321,6 +2321,7 @@ out:
 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
 {\
+       free(ff->ph->env.__feat_env);                \
        ff->ph->env.__feat_env = do_read_string(ff); \
        return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
 }
@@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session,
        struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
        int type = fe->header.type;
        u64 feat = fe->feat_id;
+       int ret = 0;
 
        if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
@@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session,
        ff.size = event->header.size - sizeof(*fe);
        ff.ph = &session->header;
 
-       if (feat_ops[feat].process(&ff, NULL))
-               return -1;
+       if (feat_ops[feat].process(&ff, NULL)) {
+               ret = -1;
+               goto out;
+       }
 
        if (!feat_ops[feat].print || !tool->show_feat_hdr)
-               return 0;
+               goto out;
 
        if (!feat_ops[feat].full_only ||
            tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
@@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session,
                fprintf(stdout, "# %s info available, use -I to display\n",
                        feat_ops[feat].name);
        }
-
-       return 0;
+out:
+       free_event_desc(ff.events);
+       return ret;
 }
 
 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
index 5f83937..0e013c2 100644 (file)
@@ -1205,61 +1205,69 @@ out_no_progress:
 
 static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
 {
+       enum intel_pt_sample_type type = decoder->state.type;
        bool ret = false;
 
+       decoder->state.type &= ~INTEL_PT_BRANCH;
+
        if (decoder->set_fup_tx_flags) {
                decoder->set_fup_tx_flags = false;
                decoder->tx_flags = decoder->fup_tx_flags;
-               decoder->state.type = INTEL_PT_TRANSACTION;
+               decoder->state.type |= INTEL_PT_TRANSACTION;
                if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
                        decoder->state.type |= INTEL_PT_BRANCH;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.flags = decoder->fup_tx_flags;
-               return true;
+               ret = true;
        }
        if (decoder->set_fup_ptw) {
                decoder->set_fup_ptw = false;
-               decoder->state.type = INTEL_PT_PTW;
+               decoder->state.type |= INTEL_PT_PTW;
                decoder->state.flags |= INTEL_PT_FUP_IP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.ptw_payload = decoder->fup_ptw_payload;
-               return true;
+               ret = true;
        }
        if (decoder->set_fup_mwait) {
                decoder->set_fup_mwait = false;
-               decoder->state.type = INTEL_PT_MWAIT_OP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
+               decoder->state.type |= INTEL_PT_MWAIT_OP;
                decoder->state.mwait_payload = decoder->fup_mwait_payload;
                ret = true;
        }
        if (decoder->set_fup_pwre) {
                decoder->set_fup_pwre = false;
                decoder->state.type |= INTEL_PT_PWR_ENTRY;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.pwre_payload = decoder->fup_pwre_payload;
                ret = true;
        }
        if (decoder->set_fup_exstop) {
                decoder->set_fup_exstop = false;
                decoder->state.type |= INTEL_PT_EX_STOP;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
                decoder->state.flags |= INTEL_PT_FUP_IP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                ret = true;
        }
        if (decoder->set_fup_bep) {
                decoder->set_fup_bep = false;
                decoder->state.type |= INTEL_PT_BLK_ITEMS;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
+               ret = true;
+       }
+       if (decoder->overflow) {
+               decoder->overflow = false;
+               if (!ret && !decoder->pge) {
+                       if (decoder->hop) {
+                               decoder->state.type = 0;
+                               decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+                       }
+                       decoder->pge = true;
+                       decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
+                       decoder->state.from_ip = 0;
+                       decoder->state.to_ip = decoder->ip;
+                       return true;
+               }
+       }
+       if (ret) {
                decoder->state.from_ip = decoder->ip;
                decoder->state.to_ip = 0;
-               ret = true;
+       } else {
+               decoder->state.type = type;
        }
        return ret;
 }
@@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
        intel_pt_clear_tx_flags(decoder);
        intel_pt_set_nr(decoder);
        decoder->timestamp_insn_cnt = 0;
-       decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+       decoder->state.from_ip = decoder->ip;
+       decoder->ip = 0;
+       decoder->pge = false;
+       decoder->set_fup_tx_flags = false;
+       decoder->set_fup_ptw = false;
+       decoder->set_fup_mwait = false;
+       decoder->set_fup_pwre = false;
+       decoder->set_fup_exstop = false;
+       decoder->set_fup_bep = false;
        decoder->overflow = true;
        return -EOVERFLOW;
 }
@@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
 /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
 static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
 {
+       *err = 0;
+
        /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
        if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
                *err = intel_pt_scan_for_psb(decoder);
@@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
                return HOP_IGNORE;
 
        case INTEL_PT_TIP_PGD:
+               decoder->pge = false;
                if (!decoder->packet.count) {
                        intel_pt_set_nr(decoder);
                        return HOP_IGNORE;
@@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
                if (!decoder->packet.count)
                        return HOP_IGNORE;
                intel_pt_set_ip(decoder);
-               if (intel_pt_fup_event(decoder))
-                       return HOP_RETURN;
-               if (!decoder->branch_enable)
+               if (decoder->set_fup_mwait || decoder->set_fup_pwre)
+                       *no_tip = true;
+               if (!decoder->branch_enable || !decoder->pge)
                        *no_tip = true;
                if (*no_tip) {
                        decoder->state.type = INTEL_PT_INSTRUCTION;
                        decoder->state.from_ip = decoder->ip;
                        decoder->state.to_ip = 0;
+                       intel_pt_fup_event(decoder);
                        return HOP_RETURN;
                }
+               intel_pt_fup_event(decoder);
+               decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
                *err = intel_pt_walk_fup_tip(decoder);
-               if (!*err)
+               if (!*err && decoder->state.to_ip)
                        decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
                return HOP_RETURN;
 
@@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
 {
        struct intel_pt_psb_info data = { .fup = false };
 
-       if (!decoder->branch_enable || !decoder->pge)
+       if (!decoder->branch_enable)
                return false;
 
        intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
@@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
                if (err)
                        return err;
 next:
+               err = 0;
                if (decoder->cyc_threshold) {
                        if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
                                decoder->sample_cyc = false;
@@ -2962,6 +2986,7 @@ next:
 
                case INTEL_PT_TIP_PGE: {
                        decoder->pge = true;
+                       decoder->overflow = false;
                        intel_pt_mtc_cyc_cnt_pge(decoder);
                        intel_pt_set_nr(decoder);
                        if (decoder->packet.count == 0) {
@@ -2999,7 +3024,7 @@ next:
                                break;
                        }
                        intel_pt_set_last_ip(decoder);
-                       if (!decoder->branch_enable) {
+                       if (!decoder->branch_enable || !decoder->pge) {
                                decoder->ip = decoder->last_ip;
                                if (intel_pt_fup_event(decoder))
                                        return 0;
@@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
        decoder->set_fup_pwre = false;
        decoder->set_fup_exstop = false;
        decoder->set_fup_bep = false;
+       decoder->overflow = false;
 
        if (!decoder->branch_enable) {
                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-               decoder->overflow = false;
                decoder->state.type = 0; /* Do not have a sample */
                return 0;
        }
@@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
                decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
        else
                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-       decoder->overflow = false;
 
        decoder->state.from_ip = 0;
        decoder->state.to_ip = decoder->ip;
@@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
        }
 
        decoder->have_last_ip = true;
-       decoder->pkt_state = INTEL_PT_STATE_NO_IP;
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 
        err = intel_pt_walk_psb(decoder);
        if (err)
@@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
 
        if (err) {
                decoder->state.err = intel_pt_ext_err(err);
-               decoder->state.from_ip = decoder->ip;
+               if (err != -EOVERFLOW)
+                       decoder->state.from_ip = decoder->ip;
                intel_pt_update_sample_time(decoder);
                decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
                intel_pt_set_nr(decoder);
index 556a893..e8613cb 100644 (file)
@@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
                                ptq->sync_switch = false;
                                intel_pt_next_tid(pt, ptq);
                        }
+                       ptq->timestamp = state->est_timestamp;
                        if (pt->synth_opts.errors) {
                                err = intel_ptq_synth_error(ptq, state);
                                if (err)
@@ -3624,6 +3625,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
                *args = p;
                return 0;
        }
+       p += 1;
        while (1) {
                vmcs = strtoull(p, &p, 0);
                if (errno)
index 5ee47ae..06a7461 100644 (file)
@@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
        int i, idx = 0;
        u64 mask = regs->mask;
 
+       if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE)
+               return -EINVAL;
+
        if (regs->cache_mask & (1ULL << id))
                goto out;
 
index 6ae5840..8dfbba1 100644 (file)
@@ -1659,6 +1659,21 @@ bool is_pmu_core(const char *name)
        return !strcmp(name, "cpu") || is_arm_pmu_core(name);
 }
 
+static bool pmu_alias_is_duplicate(struct sevent *alias_a,
+                                  struct sevent *alias_b)
+{
+       /* Different names -> never duplicates */
+       if (strcmp(alias_a->name, alias_b->name))
+               return false;
+
+       /* Don't remove duplicates for hybrid PMUs */
+       if (perf_pmu__is_hybrid(alias_a->pmu) &&
+           perf_pmu__is_hybrid(alias_b->pmu))
+               return false;
+
+       return true;
+}
+
 void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
                        bool long_desc, bool details_flag, bool deprecated,
                        const char *pmu_name)
@@ -1744,12 +1759,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
        qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
        for (j = 0; j < len; j++) {
                /* Skip duplicates */
-               if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name)) {
-                       if (!aliases[j].pmu || !aliases[j - 1].pmu ||
-                           !strcmp(aliases[j].pmu, aliases[j - 1].pmu)) {
-                               continue;
-                       }
-               }
+               if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
+                       continue;
 
                if (name_only) {
                        printf("%s ", aliases[j].name);
index 563a9ba..7f782a3 100644 (file)
@@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
                struct tep_event *tp_format;
 
                tp_format = trace_event__tp_format_id(evsel->core.attr.config);
-               if (!tp_format)
+               if (IS_ERR_OR_NULL(tp_format))
                        return NULL;
 
                evsel->tp_format = tp_format;
index 20bacd5..34f1b1b 100644 (file)
@@ -15,7 +15,7 @@ int smt_on(void)
        if (cached)
                return cached_result;
 
-       if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0)
+       if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0)
                goto done;
 
        ncpu = sysconf(_SC_NPROCESSORS_CONF);
index 331f6d3..cd71068 100644 (file)
@@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include
 ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
 CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
 CFLAGS += $(WARNINGS)
+MKDIR = mkdir
 
 ifeq ($(strip $(V)),false)
        QUIET=@
index 2a6c170..1d7616f 100644 (file)
@@ -21,6 +21,7 @@ $(KERNEL_INCLUDE):
 
 $(objdir)%.o: %.c $(KERNEL_INCLUDE)
        $(ECHO) "  CC      " $(subst $(OUTPUT),,$@)
+       $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null
        $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
 
 all: $(OUTPUT)$(TOOL)
index 5d52ea2..df3b292 100644 (file)
@@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n)
        return sum;
 }
 
+__weak noinline struct file *bpf_testmod_return_ptr(int arg)
+{
+       static struct file f = {};
+
+       switch (arg) {
+       case 1: return (void *)EINVAL;          /* user addr */
+       case 2: return (void *)0xcafe4a11;      /* user addr */
+       case 3: return (void *)-EINVAL;         /* canonical, but invalid */
+       case 4: return (void *)(1ull << 60);    /* non-canonical and invalid */
+       case 5: return (void *)~(1ull << 30);   /* trigger extable */
+       case 6: return &f;                      /* valid addr */
+       case 7: return (void *)((long)&f | 1);  /* kernel tricks */
+       default: return NULL;
+       }
+}
+
 noinline ssize_t
 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
                      struct bin_attribute *bin_attr,
@@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
                .off = off,
                .len = len,
        };
+       int i = 1;
+
+       while (bpf_testmod_return_ptr(i))
+               i++;
 
        /* This is always true. Use the check to make sure the compiler
         * doesn't remove bpf_testmod_loop_test.
index 762f6a9..664ffc0 100644 (file)
@@ -90,7 +90,7 @@ static void print_err_line(void)
 
 static void test_conn(void)
 {
-       int listen_fd = -1, cli_fd = -1, err;
+       int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
        socklen_t addrlen = sizeof(srv_sa6);
        int srv_port;
 
@@ -112,6 +112,10 @@ static void test_conn(void)
        if (CHECK_FAIL(cli_fd == -1))
                goto done;
 
+       srv_fd = accept(listen_fd, NULL, NULL);
+       if (CHECK_FAIL(srv_fd == -1))
+               goto done;
+
        if (CHECK(skel->bss->listen_tp_sport != srv_port ||
                  skel->bss->req_sk_sport != srv_port,
                  "Unexpected sk src port",
@@ -134,11 +138,13 @@ done:
                close(listen_fd);
        if (cli_fd != -1)
                close(cli_fd);
+       if (srv_fd != -1)
+               close(srv_fd);
 }
 
 static void test_syncookie(void)
 {
-       int listen_fd = -1, cli_fd = -1, err;
+       int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
        socklen_t addrlen = sizeof(srv_sa6);
        int srv_port;
 
@@ -161,6 +167,10 @@ static void test_syncookie(void)
        if (CHECK_FAIL(cli_fd == -1))
                goto done;
 
+       srv_fd = accept(listen_fd, NULL, NULL);
+       if (CHECK_FAIL(srv_fd == -1))
+               goto done;
+
        if (CHECK(skel->bss->listen_tp_sport != srv_port,
                  "Unexpected tp src port",
                  "listen_tp_sport:%u expected:%u\n",
@@ -188,6 +198,8 @@ done:
                close(listen_fd);
        if (cli_fd != -1)
                close(cli_fd);
+       if (srv_fd != -1)
+               close(srv_fd);
 }
 
 struct test {
index b368570..50ce16d 100644 (file)
@@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit,
        return 0;
 }
 
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
+{
+       long buf = 0;
+
+       bpf_probe_read_kernel(&buf, 8, ret);
+       bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
+       *(volatile long long *)ret;
+       *(volatile int *)&ret->f_mode;
+       return 0;
+}
+
 __u32 fmod_ret_read_sz = 0;
 
 SEC("fmod_ret/bpf_testmod_test_read")
index 465ef3f..d3bf83d 100644 (file)
@@ -54,7 +54,7 @@
 #define MAX_INSNS      BPF_MAXINSNS
 #define MAX_TEST_INSNS 1000000
 #define MAX_FIXUPS     8
-#define MAX_NR_MAPS    21
+#define MAX_NR_MAPS    22
 #define MAX_TEST_RUNS  8
 #define POINTER_VALUE  0xcafe4all
 #define TEST_DATA_LEN  64
index c22dc83..b39665f 100644 (file)
                BPF_EXIT_INSN(),
        },
        .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R0 leaks addr into mem",
 },
 {
        "Dest pointer in r0 - succeed",
                BPF_EXIT_INSN(),
        },
        .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+       "Dest pointer in r0 - succeed, check 2",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* r1 = *r0 */
+               BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+       "Dest pointer in r0 - succeed, check 3",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "invalid size of register fill",
+       .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+       "Dest pointer in r0 - succeed, check 4",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* r1 = *r10 */
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R10 partial copy of pointer",
+},
+{
+       "Dest pointer in r0 - succeed, check 5",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* r1 = *r0 */
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "R0 invalid mem access",
+       .errstr_unpriv = "R10 partial copy of pointer",
 },
index 3bc9ff7..5bf03fb 100644 (file)
@@ -1,3 +1,97 @@
+{
+       "atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+       "atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+       "atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = REJECT,
+       .errstr = "invalid size of register fill",
+},
+{
+       "atomic w/fetch and address leakage of (map ptr & -1) via returned value",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = REJECT,
+       .errstr = "invalid size of register fill",
+},
 #define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
        {                                                               \
                "atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
index 7e50cb8..6825197 100644 (file)
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
+{
+       "precision tracking for u32 spill/fill",
+       .insns = {
+               BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV32_IMM(BPF_REG_6, 32),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV32_IMM(BPF_REG_6, 4),
+               /* Additional insns to introduce a pruning point. */
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               /* u32 spill/fill */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
+               /* out-of-bound map value access for r6=32 */
+               BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+               BPF_LD_MAP_FD(BPF_REG_1, 0),
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 15 },
+       .result = REJECT,
+       .errstr = "R0 min value is outside of the allowed memory range",
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+       "precision tracking for u32 spills, u64 fill",
+       .insns = {
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+               BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
+               /* Additional insns to introduce a pruning point. */
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+               /* u32 spills, u64 fill */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
+               /* if r8 != X goto pc+1  r8 known in fallthrough branch */
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               /* if r8 == X goto pc+1  condition always true on first
+                * traversal, so starts backtracking to mark r8 as requiring
+                * precision. r7 marked as needing precision. r6 not marked
+                * since it's not tracked.
+                */
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
+               /* fails if r8 correctly marked unknown after fill. */
+               BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "div by zero",
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
 {
        "allocated_stack",
        .insns = {
index 7ab3de1..6c90714 100644 (file)
        .errstr = "invalid access to packet",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
+{
+       "Spill u32 const scalars.  Refill as u64.  Offset to skb->data",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       /* r6 = 0 */
+       BPF_MOV32_IMM(BPF_REG_6, 0),
+       /* r7 = 20 */
+       BPF_MOV32_IMM(BPF_REG_7, 20),
+       /* *(u32 *)(r10 -4) = r6 */
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+       /* *(u32 *)(r10 -8) = r7 */
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+       /* r4 = *(u64 *)(r10 -8) */
+       BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+       /* r0 = r2 */
+       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "invalid access to packet",
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
 {
        "Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
        .insns = {
index 2debba4..359f3e8 100644 (file)
        .errstr = "R0 invalid mem access 'inv'",
        .errstr_unpriv = "R0 pointer -= pointer prohibited",
 },
+{
+       "map access: trying to leak tainted dst reg",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+       BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF),
+       BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 4 },
+       .result = REJECT,
+       .errstr = "math between map_value pointer and 4294967295 is not allowed",
+},
 {
        "32bit pkt_ptr -= scalar",
        .insns = {
index bfb9738..b4ec228 100644 (file)
@@ -35,7 +35,7 @@
        .prog_type = BPF_PROG_TYPE_XDP,
 },
 {
-       "XDP pkt read, pkt_data' > pkt_end, good access",
+       "XDP pkt read, pkt_data' > pkt_end, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_end > pkt_data', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end > pkt_data', bad access 1",
+       "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_end > pkt_data', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data' < pkt_end, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+       "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end < pkt_data', good access",
+       "XDP pkt read, pkt_data' < pkt_end, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data' >= pkt_end, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+       "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end >= pkt_data', good access",
+       "XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end >= pkt_data', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' <= pkt_end, good access",
+       "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_end <= pkt_data', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+       "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' > pkt_data, good access",
+       "XDP pkt read, pkt_end <= pkt_data', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data > pkt_meta', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+       "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data > pkt_meta', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_meta' < pkt_data, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+       "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data < pkt_meta', good access",
+       "XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_meta' >= pkt_data, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data >= pkt_meta', good access",
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' <= pkt_data, good access",
+       "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data <= pkt_meta', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+       "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
index 623cec0..0cf7e90 100644 (file)
@@ -221,7 +221,7 @@ int cg_find_unified_root(char *root, size_t len)
 
 int cg_create(const char *cgroup)
 {
-       return mkdir(cgroup, 0644);
+       return mkdir(cgroup, 0755);
 }
 
 int cg_wait_for_proc_count(const char *cgroup, int count)
index 3df648c..6001235 100644 (file)
@@ -1,11 +1,14 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 
+#define _GNU_SOURCE
 #include <linux/limits.h>
+#include <linux/sched.h>
 #include <sys/types.h>
 #include <sys/mman.h>
 #include <sys/wait.h>
 #include <unistd.h>
 #include <fcntl.h>
+#include <sched.h>
 #include <stdio.h>
 #include <errno.h>
 #include <signal.h>
@@ -674,6 +677,166 @@ cleanup:
        return ret;
 }
 
+/*
+ * cgroup migration permission check should be performed based on the
+ * credentials at the time of open instead of write.
+ */
+static int test_cgcore_lesser_euid_open(const char *root)
+{
+       const uid_t test_euid = 65534;  /* usually nobody, any !root is fine */
+       int ret = KSFT_FAIL;
+       char *cg_test_a = NULL, *cg_test_b = NULL;
+       char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
+       int cg_test_b_procs_fd = -1;
+       uid_t saved_uid;
+
+       cg_test_a = cg_name(root, "cg_test_a");
+       cg_test_b = cg_name(root, "cg_test_b");
+
+       if (!cg_test_a || !cg_test_b)
+               goto cleanup;
+
+       cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
+       cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
+
+       if (!cg_test_a_procs || !cg_test_b_procs)
+               goto cleanup;
+
+       if (cg_create(cg_test_a) || cg_create(cg_test_b))
+               goto cleanup;
+
+       if (cg_enter_current(cg_test_a))
+               goto cleanup;
+
+       if (chown(cg_test_a_procs, test_euid, -1) ||
+           chown(cg_test_b_procs, test_euid, -1))
+               goto cleanup;
+
+       saved_uid = geteuid();
+       if (seteuid(test_euid))
+               goto cleanup;
+
+       cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
+
+       if (seteuid(saved_uid))
+               goto cleanup;
+
+       if (cg_test_b_procs_fd < 0)
+               goto cleanup;
+
+       if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       cg_enter_current(root);
+       if (cg_test_b_procs_fd >= 0)
+               close(cg_test_b_procs_fd);
+       if (cg_test_b)
+               cg_destroy(cg_test_b);
+       if (cg_test_a)
+               cg_destroy(cg_test_a);
+       free(cg_test_b_procs);
+       free(cg_test_a_procs);
+       free(cg_test_b);
+       free(cg_test_a);
+       return ret;
+}
+
+struct lesser_ns_open_thread_arg {
+       const char      *path;
+       int             fd;
+       int             err;
+};
+
+static int lesser_ns_open_thread_fn(void *arg)
+{
+       struct lesser_ns_open_thread_arg *targ = arg;
+
+       targ->fd = open(targ->path, O_RDWR);
+       targ->err = errno;
+       return 0;
+}
+
+/*
+ * cgroup migration permission check should be performed based on the cgroup
+ * namespace at the time of open instead of write.
+ */
+static int test_cgcore_lesser_ns_open(const char *root)
+{
+       static char stack[65536];
+       const uid_t test_euid = 65534;  /* usually nobody, any !root is fine */
+       int ret = KSFT_FAIL;
+       char *cg_test_a = NULL, *cg_test_b = NULL;
+       char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
+       int cg_test_b_procs_fd = -1;
+       struct lesser_ns_open_thread_arg targ = { .fd = -1 };
+       pid_t pid;
+       int status;
+
+       cg_test_a = cg_name(root, "cg_test_a");
+       cg_test_b = cg_name(root, "cg_test_b");
+
+       if (!cg_test_a || !cg_test_b)
+               goto cleanup;
+
+       cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
+       cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
+
+       if (!cg_test_a_procs || !cg_test_b_procs)
+               goto cleanup;
+
+       if (cg_create(cg_test_a) || cg_create(cg_test_b))
+               goto cleanup;
+
+       if (cg_enter_current(cg_test_b))
+               goto cleanup;
+
+       if (chown(cg_test_a_procs, test_euid, -1) ||
+           chown(cg_test_b_procs, test_euid, -1))
+               goto cleanup;
+
+       targ.path = cg_test_b_procs;
+       pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
+                   CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
+                   &targ);
+       if (pid < 0)
+               goto cleanup;
+
+       if (waitpid(pid, &status, 0) < 0)
+               goto cleanup;
+
+       if (!WIFEXITED(status))
+               goto cleanup;
+
+       cg_test_b_procs_fd = targ.fd;
+       if (cg_test_b_procs_fd < 0)
+               goto cleanup;
+
+       if (cg_enter_current(cg_test_a))
+               goto cleanup;
+
+       if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       cg_enter_current(root);
+       if (cg_test_b_procs_fd >= 0)
+               close(cg_test_b_procs_fd);
+       if (cg_test_b)
+               cg_destroy(cg_test_b);
+       if (cg_test_a)
+               cg_destroy(cg_test_a);
+       free(cg_test_b_procs);
+       free(cg_test_a_procs);
+       free(cg_test_b);
+       free(cg_test_a);
+       return ret;
+}
+
 #define T(x) { x, #x }
 struct corecg_test {
        int (*fn)(const char *root);
@@ -689,6 +852,8 @@ struct corecg_test {
        T(test_cgcore_proc_migration),
        T(test_cgcore_thread_migration),
        T(test_cgcore_destroy),
+       T(test_cgcore_lesser_euid_open),
+       T(test_cgcore_lesser_ns_open),
 };
 #undef T
 
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
new file mode 100644 (file)
index 0000000..c6c2965
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+huge_count_read_write
index 8a3f2cd..937d36a 100644 (file)
@@ -1,7 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for damon selftests
 
-TEST_FILES = _chk_dependency.sh
-TEST_PROGS = debugfs_attrs.sh
+TEST_GEN_FILES += huge_count_read_write
+
+TEST_FILES = _chk_dependency.sh _debugfs_common.sh
+TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
+TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh
new file mode 100644 (file)
index 0000000..48989d4
--- /dev/null
@@ -0,0 +1,52 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+test_write_result() {
+       file=$1
+       content=$2
+       orig_content=$3
+       expect_reason=$4
+       expected=$5
+
+       echo "$content" > "$file"
+       if [ $? -ne "$expected" ]
+       then
+               echo "writing $content to $file doesn't return $expected"
+               echo "expected because: $expect_reason"
+               echo "$orig_content" > "$file"
+               exit 1
+       fi
+}
+
+test_write_succ() {
+       test_write_result "$1" "$2" "$3" "$4" 0
+}
+
+test_write_fail() {
+       test_write_result "$1" "$2" "$3" "$4" 1
+}
+
+test_content() {
+       file=$1
+       orig_content=$2
+       expected=$3
+       expect_reason=$4
+
+       content=$(cat "$file")
+       if [ "$content" != "$expected" ]
+       then
+               echo "reading $file expected $expected but $content"
+               echo "expected because: $expect_reason"
+               echo "$orig_content" > "$file"
+               exit 1
+       fi
+}
+
+source ./_chk_dependency.sh
+
+damon_onoff="$DBGFS/monitor_on"
+if [ $(cat "$damon_onoff") = "on" ]
+then
+       echo "monitoring is on"
+       exit $ksft_skip
+fi
index 196b664..902e312 100644 (file)
@@ -1,48 +1,7 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
-test_write_result() {
-       file=$1
-       content=$2
-       orig_content=$3
-       expect_reason=$4
-       expected=$5
-
-       echo "$content" > "$file"
-       if [ $? -ne "$expected" ]
-       then
-               echo "writing $content to $file doesn't return $expected"
-               echo "expected because: $expect_reason"
-               echo "$orig_content" > "$file"
-               exit 1
-       fi
-}
-
-test_write_succ() {
-       test_write_result "$1" "$2" "$3" "$4" 0
-}
-
-test_write_fail() {
-       test_write_result "$1" "$2" "$3" "$4" 1
-}
-
-test_content() {
-       file=$1
-       orig_content=$2
-       expected=$3
-       expect_reason=$4
-
-       content=$(cat "$file")
-       if [ "$content" != "$expected" ]
-       then
-               echo "reading $file expected $expected but $content"
-               echo "expected because: $expect_reason"
-               echo "$orig_content" > "$file"
-               exit 1
-       fi
-}
-
-source ./_chk_dependency.sh
+source _debugfs_common.sh
 
 # Test attrs file
 # ===============
@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
        "min_nr_regions > max_nr_regions"
 test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
 echo "$orig_content" > "$file"
-
-# Test schemes file
-# =================
-
-file="$DBGFS/schemes"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
-       "$orig_content" "valid input"
-test_write_fail "$file" "1 2
-3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
-test_write_succ "$file" "" "$orig_content" "disabling"
-echo "$orig_content" > "$file"
-
-# Test target_ids file
-# ====================
-
-file="$DBGFS/target_ids"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
-test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
-test_content "$file" "$orig_content" "1 2" "non-integer was there"
-test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
-test_content "$file" "$orig_content" "" "wrong input written"
-test_write_succ "$file" "" "$orig_content" "empty input"
-test_content "$file" "$orig_content" "" "empty input written"
-echo "$orig_content" > "$file"
-
-echo "PASS"
diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh
new file mode 100644 (file)
index 0000000..87aff80
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test empty targets case
+# =======================
+
+orig_target_ids=$(cat "$DBGFS/target_ids")
+echo "" > "$DBGFS/target_ids"
+orig_monitor_on=$(cat "$DBGFS/monitor_on")
+test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids"
+echo "$orig_target_ids" > "$DBGFS/target_ids"
diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
new file mode 100644 (file)
index 0000000..922cada
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test huge count read write
+# ==========================
+
+dmesg -C
+
+for file in "$DBGFS/"*
+do
+       ./huge_count_read_write "$file"
+done
+
+if dmesg | grep -q WARNING
+then
+       dmesg
+       exit 1
+else
+       exit 0
+fi
diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh
new file mode 100644 (file)
index 0000000..5b39ab4
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test schemes file
+# =================
+
+file="$DBGFS/schemes"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
+       "$orig_content" "valid input"
+test_write_fail "$file" "1 2
+3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
+test_write_succ "$file" "" "$orig_content" "disabling"
+test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
+       "$orig_content" "wrong condition ranges"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh
new file mode 100644 (file)
index 0000000..49aeabd
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test target_ids file
+# ====================
+
+file="$DBGFS/target_ids"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
+test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
+test_content "$file" "$orig_content" "1 2" "non-integer was there"
+test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
+test_content "$file" "$orig_content" "" "wrong input written"
+test_write_succ "$file" "" "$orig_content" "empty input"
+test_content "$file" "$orig_content" "" "empty input written"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c
new file mode 100644 (file)
index 0000000..ad7a6b4
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+void write_read_with_huge_count(char *file)
+{
+       int filedesc = open(file, O_RDWR);
+       char buf[25];
+       int ret;
+
+       printf("%s %s\n", __func__, file);
+       if (filedesc < 0) {
+               fprintf(stderr, "failed opening %s\n", file);
+               exit(1);
+       }
+
+       write(filedesc, "", 0xfffffffful);
+       perror("after write: ");
+       ret = read(filedesc, buf, 0xfffffffful);
+       perror("after read: ");
+       close(filedesc);
+}
+
+int main(int argc, char *argv[])
+{
+       if (argc != 2) {
+               fprintf(stderr, "Usage: %s <file>\n", argv[0]);
+               exit(1);
+       }
+       write_read_with_huge_count(argv[1]);
+
+       return 0;
+}
index b513f64..026a126 100755 (executable)
@@ -72,6 +72,35 @@ rif_mac_profile_replacement_test()
        ip link set $h1.10 address $h1_10_mac
 }
 
+rif_mac_profile_consolidation_test()
+{
+       local count=$1; shift
+       local h1_20_mac
+
+       RET=0
+
+       if [[ $count -eq 1 ]]; then
+               return
+       fi
+
+       h1_20_mac=$(mac_get $h1.20)
+
+       # Set the MAC of $h1.20 to that of $h1.10 and confirm that they are
+       # using the same MAC profile.
+       ip link set $h1.20 address 00:11:11:11:11:11
+       check_err $?
+
+       occ=$(devlink -j resource show $DEVLINK_DEV \
+             | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]')
+
+       [[ $occ -eq $((count - 1)) ]]
+       check_err $? "MAC profile occupancy did not decrease"
+
+       log_test "RIF MAC profile consolidation"
+
+       ip link set $h1.20 address $h1_20_mac
+}
+
 rif_mac_profile_shared_replacement_test()
 {
        local count=$1; shift
@@ -104,6 +133,7 @@ rif_mac_profile_edit_test()
        create_max_rif_mac_profiles $count
 
        rif_mac_profile_replacement_test
+       rif_mac_profile_consolidation_test $count
        rif_mac_profile_shared_replacement_test $count
 }
 
index 3763105..3cb5ac5 100644 (file)
 /x86_64/svm_int_ctl_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
+/x86_64/userspace_io_test
 /x86_64/userspace_msr_exit_test
 /x86_64/vmx_apic_access_test
 /x86_64/vmx_close_while_nested_test
 /x86_64/vmx_dirty_log_test
+/x86_64/vmx_invalid_nested_guest_state
 /x86_64/vmx_preemption_timer_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
index c4e3471..17342b5 100644 (file)
@@ -59,10 +59,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
index 6a1a37f..2d62edc 100644 (file)
@@ -321,6 +321,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
 
 unsigned int vm_get_page_size(struct kvm_vm *vm);
 unsigned int vm_get_page_shift(struct kvm_vm *vm);
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
 uint64_t vm_get_max_gfn(struct kvm_vm *vm);
 int vm_get_fd(struct kvm_vm *vm);
 
index 8f2e0bb..53d2b5d 100644 (file)
@@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
                (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
 
        /* Limit physical addresses to PA-bits. */
-       vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+       vm->max_gfn = vm_compute_max_gfn(vm);
 
        /* Allocate and setup memory for guest. */
        vm->vpages_mapped = sparsebit_alloc();
@@ -2328,6 +2328,11 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
        return vm->page_shift;
 }
 
+unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
+{
+       return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+}
+
 uint64_t vm_get_max_gfn(struct kvm_vm *vm)
 {
        return vm->max_gfn;
index 82c39db..eef7b34 100644 (file)
@@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
 
        return cpuid;
 }
+
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+static inline unsigned x86_family(unsigned int eax)
+{
+        unsigned int x86;
+
+        x86 = (eax >> 8) & 0xf;
+
+        if (x86 == 0xf)
+                x86 += (eax >> 20) & 0xff;
+
+        return x86;
+}
+
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
+{
+       const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
+       unsigned long ht_gfn, max_gfn, max_pfn;
+       uint32_t eax, ebx, ecx, edx, max_ext_leaf;
+
+       max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+
+       /* Avoid reserved HyperTransport region on AMD processors.  */
+       eax = ecx = 0;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
+           ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
+           edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+               return max_gfn;
+
+       /* On parts with <40 physical address bits, the area is fully hidden */
+       if (vm->pa_bits < 40)
+               return max_gfn;
+
+       /* Before family 17h, the HyperTransport area is just below 1T.  */
+       ht_gfn = (1 << 28) - num_ht_pages;
+       eax = 1;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       if (x86_family(eax) < 0x17)
+               goto done;
+
+       /*
+        * Otherwise it's at the top of the physical address space, possibly
+        * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX.  Use
+        * the old conservative value if MAXPHYADDR is not enumerated.
+        */
+       eax = 0x80000000;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_ext_leaf = eax;
+       if (max_ext_leaf < 0x80000008)
+               goto done;
+
+       eax = 0x80000008;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
+       if (max_ext_leaf >= 0x8000001f) {
+               eax = 0x8000001f;
+               cpuid(&eax, &ebx, &ecx, &edx);
+               max_pfn >>= (ebx >> 6) & 0x3f;
+       }
+
+       ht_gfn = max_pfn - num_ht_pages;
+done:
+       return min(max_gfn, ht_gfn - 1);
+}
index df04f56..30a8103 100644 (file)
@@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm)
        vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
 
        /* No intercepts for real and virtual interrupts */
-       vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+       vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
 
        /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
        vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
new file mode 100644 (file)
index 0000000..e4bef2e
--- /dev/null
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID                        1
+
+static void guest_ins_port80(uint8_t *buffer, unsigned int count)
+{
+       unsigned long end;
+
+       if (count == 2)
+               end = (unsigned long)buffer + 1;
+       else
+               end = (unsigned long)buffer + 8192;
+
+       asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
+       GUEST_ASSERT_1(count == 0, count);
+       GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
+}
+
+static void guest_code(void)
+{
+       uint8_t buffer[8192];
+       int i;
+
+       /*
+        * Special case tests.  main() will adjust RCX 2 => 1 and 3 => 8192 to
+        * test that KVM doesn't explode when userspace modifies the "count" on
+        * a userspace I/O exit.  KVM isn't required to play nice with the I/O
+        * itself as KVM doesn't support manipulating the count, it just needs
+        * to not explode or overflow a buffer.
+        */
+       guest_ins_port80(buffer, 2);
+       guest_ins_port80(buffer, 3);
+
+       /* Verify KVM fills the buffer correctly when not stuffing RCX. */
+       memset(buffer, 0, sizeof(buffer));
+       guest_ins_port80(buffer, 8192);
+       for (i = 0; i < 8192; i++)
+               GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
+
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       struct kvm_regs regs;
+       struct kvm_run *run;
+       struct kvm_vm *vm;
+       struct ucall uc;
+       int rc;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+       run = vcpu_state(vm, VCPU_ID);
+
+       memset(&regs, 0, sizeof(regs));
+
+       while (1) {
+               rc = _vcpu_run(vm, VCPU_ID);
+
+               TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               if (get_ucall(vm, VCPU_ID, &uc))
+                       break;
+
+               TEST_ASSERT(run->io.port == 0x80,
+                           "Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
+
+               /*
+                * Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
+                * Note, this abuses KVM's batching of rep string I/O to avoid
+                * getting stuck in an infinite loop.  That behavior isn't in
+                * scope from a testing perspective as it's not ABI in any way,
+                * i.e. it really is abusing internal KVM knowledge.
+                */
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+               if (regs.rcx == 2)
+                       regs.rcx = 1;
+               if (regs.rcx == 3)
+                       regs.rcx = 8192;
+               memset((void *)run + run->io.data_offset, 0xaa, 4096);
+               vcpu_regs_set(vm, VCPU_ID, &regs);
+       }
+
+       switch (uc.cmd) {
+       case UCALL_DONE:
+               break;
+       case UCALL_ABORT:
+               TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx",
+                         (const char *)uc.args[0], __FILE__, uc.args[1],
+                         uc.args[2], uc.args[3]);
+       default:
+               TEST_FAIL("Unknown ucall %lu", uc.cmd);
+       }
+
+       kvm_vm_free(vm);
+       return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
new file mode 100644 (file)
index 0000000..489fbed
--- /dev/null
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define VCPU_ID        0
+#define ARBITRARY_IO_PORT 0x2000
+
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+       /*
+        * Generate an exit to L0 userspace, i.e. main(), via I/O to an
+        * arbitrary port.
+        */
+       asm volatile("inb %%dx, %%al"
+                    : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+       GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+       GUEST_ASSERT(load_vmcs(vmx_pages));
+
+       /* Prepare the VMCS for L2 execution. */
+       prepare_vmcs(vmx_pages, l2_guest_code,
+                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       /*
+        * L2 must be run without unrestricted guest, verify that the selftests
+        * library hasn't enabled it.  Because KVM selftests jump directly to
+        * 64-bit mode, unrestricted guest support isn't required.
+        */
+       GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ||
+                    !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST));
+
+       GUEST_ASSERT(!vmlaunch());
+
+       /* L2 should triple fault after main() stuffs invalid guest state. */
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t vmx_pages_gva;
+       struct kvm_sregs sregs;
+       struct kvm_run *run;
+       struct ucall uc;
+
+       nested_vmx_check_supported();
+
+       vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+       /* Allocate VMX pages and shared descriptors (vmx_pages). */
+       vcpu_alloc_vmx(vm, &vmx_pages_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+       vcpu_run(vm, VCPU_ID);
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       /*
+        * The first exit to L0 userspace should be an I/O access from L2.
+        * Running L1 should launch L2 without triggering an exit to userspace.
+        */
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                   "Expected KVM_EXIT_IO, got: %u (%s)\n",
+                   run->exit_reason, exit_reason_str(run->exit_reason));
+
+       TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
+                   "Expected IN from port %d from L2, got port %d",
+                   ARBITRARY_IO_PORT, run->io.port);
+
+       /*
+        * Stuff invalid guest state for L2 by making TR unusuable.  The next
+        * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
+        * emulating invalid guest state for L2.
+        */
+       memset(&sregs, 0, sizeof(sregs));
+       vcpu_sregs_get(vm, VCPU_ID, &sregs);
+       sregs.tr.unusable = 1;
+       vcpu_sregs_set(vm, VCPU_ID, &sregs);
+
+       vcpu_run(vm, VCPU_ID);
+
+       switch (get_ucall(vm, VCPU_ID, &uc)) {
+       case UCALL_DONE:
+               break;
+       case UCALL_ABORT:
+               TEST_FAIL("%s", (const char *)uc.args[0]);
+       default:
+               TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+       }
+}
index 23051d8..2454a1f 100644 (file)
@@ -110,22 +110,5 @@ int main(int argc, char *argv[])
        ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
        TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
 
-       /* testcase 4, set capabilities when we don't have PDCM bit */
-       entry_1_0->ecx &= ~X86_FEATURE_PDCM;
-       vcpu_set_cpuid(vm, VCPU_ID, cpuid);
-       ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
-       TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
-       /* testcase 5, set capabilities when we don't have PMU version bits */
-       entry_1_0->ecx |= X86_FEATURE_PDCM;
-       eax.split.version_id = 0;
-       entry_1_0->ecx = eax.full;
-       vcpu_set_cpuid(vm, VCPU_ID, cpuid);
-       ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
-       TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
-       vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
-       ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
-
        kvm_vm_free(vm);
 }
old mode 100644 (file)
new mode 100755 (executable)
index 7f5b265..ad2982b 100755 (executable)
@@ -455,6 +455,22 @@ cleanup()
        ip netns del ${NSC} >/dev/null 2>&1
 }
 
+cleanup_vrf_dup()
+{
+       ip link del ${NSA_DEV2} >/dev/null 2>&1
+       ip netns pids ${NSC} | xargs kill 2>/dev/null
+       ip netns del ${NSC} >/dev/null 2>&1
+}
+
+setup_vrf_dup()
+{
+       # some VRF tests use ns-C which has the same config as
+       # ns-B but for a device NOT in the VRF
+       create_ns ${NSC} "-" "-"
+       connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
+                  ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
+}
+
 setup()
 {
        local with_vrf=${1}
@@ -484,12 +500,6 @@ setup()
 
                ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV}
                ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV}
-
-               # some VRF tests use ns-C which has the same config as
-               # ns-B but for a device NOT in the VRF
-               create_ns ${NSC} "-" "-"
-               connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
-                          ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
        else
                ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV}
                ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV}
@@ -1240,7 +1250,9 @@ ipv4_tcp_vrf()
        log_test_addr ${a} $? 1 "Global server, local connection"
 
        # run MD5 tests
+       setup_vrf_dup
        ipv4_tcp_md5
+       cleanup_vrf_dup
 
        #
        # enable VRF global server
@@ -1798,8 +1810,9 @@ ipv4_addr_bind_vrf()
        for a in ${NSA_IP} ${VRF_IP}
        do
                log_start
+               show_hint "Socket not bound to VRF, but address is in VRF"
                run_cmd nettest -s -R -P icmp -l ${a} -b
-               log_test_addr ${a} $? 0 "Raw socket bind to local address"
+               log_test_addr ${a} $? 1 "Raw socket bind to local address"
 
                log_start
                run_cmd nettest -s -R -P icmp -l ${a} -I ${NSA_DEV} -b
@@ -2191,7 +2204,7 @@ ipv6_ping_vrf()
                log_start
                show_hint "Fails since VRF device does not support linklocal or multicast"
                run_cmd ${ping6} -c1 -w1 ${a}
-               log_test_addr ${a} $? 2 "ping out, VRF bind"
+               log_test_addr ${a} $? 1 "ping out, VRF bind"
        done
 
        for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV}
@@ -2719,7 +2732,9 @@ ipv6_tcp_vrf()
        log_test_addr ${a} $? 1 "Global server, local connection"
 
        # run MD5 tests
+       setup_vrf_dup
        ipv6_tcp_md5
+       cleanup_vrf_dup
 
        #
        # enable VRF global server
@@ -3414,11 +3429,14 @@ ipv6_addr_bind_novrf()
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
        log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind"
 
+       # Sadly, the kernel allows binding a socket to a device and then
+       # binding to an address not on the device. So this test passes
+       # when it really should not
        a=${NSA_LO_IP6}
        log_start
-       show_hint "Should fail with 'Cannot assign requested address'"
+       show_hint "Tecnically should fail since address is not on device but kernel allows"
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
-       log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address"
+       log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address"
 }
 
 ipv6_addr_bind_vrf()
@@ -3459,10 +3477,15 @@ ipv6_addr_bind_vrf()
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
        log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind"
 
+       # Sadly, the kernel allows binding a socket to a device and then
+       # binding to an address not on the device. The only restriction
+       # is that the address is valid in the L3 domain. So this test
+       # passes when it really should not
        a=${VRF_IP6}
        log_start
+       show_hint "Tecnically should fail since address is not on device but kernel allows"
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
-       log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind"
+       log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind"
 
        a=${NSA_LO_IP6}
        log_start
@@ -4077,3 +4100,11 @@ cleanup 2>/dev/null
 
 printf "\nTests passed: %3d\n" ${nsuccess}
 printf "Tests failed: %3d\n"   ${nfail}
+
+if [ $nfail -ne 0 ]; then
+       exit 1 # KSFT_FAIL
+elif [ $nsuccess -eq 0 ]; then
+       exit $ksft_skip
+fi
+
+exit 0 # KSFT_PASS
index 5abe92d..996af1a 100755 (executable)
@@ -444,24 +444,63 @@ fib_rp_filter_test()
        setup
 
        set -e
+       ip netns add ns2
+       ip netns set ns2 auto
+
+       ip -netns ns2 link set dev lo up
+
+       $IP link add name veth1 type veth peer name veth2
+       $IP link set dev veth2 netns ns2
+       $IP address add 192.0.2.1/24 dev veth1
+       ip -netns ns2 address add 192.0.2.1/24 dev veth2
+       $IP link set dev veth1 up
+       ip -netns ns2 link set dev veth2 up
+
        $IP link set dev lo address 52:54:00:6a:c7:5e
-       $IP link set dummy0 address 52:54:00:6a:c7:5e
-       $IP link add dummy1 type dummy
-       $IP link set dummy1 address 52:54:00:6a:c7:5e
-       $IP link set dev dummy1 up
+       $IP link set dev veth1 address 52:54:00:6a:c7:5e
+       ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e
+       ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e
+
+       # 1. (ns2) redirect lo's egress to veth2's egress
+       ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel
+       ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \
+               action mirred egress redirect dev veth2
+       ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \
+               action mirred egress redirect dev veth2
+
+       # 2. (ns1) redirect veth1's ingress to lo's ingress
+       $NS_EXEC tc qdisc add dev veth1 ingress
+       $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \
+               action mirred ingress redirect dev lo
+       $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \
+               action mirred ingress redirect dev lo
+
+       # 3. (ns1) redirect lo's egress to veth1's egress
+       $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel
+       $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \
+               action mirred egress redirect dev veth1
+       $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \
+               action mirred egress redirect dev veth1
+
+       # 4. (ns2) redirect veth2's ingress to lo's ingress
+       ip netns exec ns2 tc qdisc add dev veth2 ingress
+       ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \
+               action mirred ingress redirect dev lo
+       ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \
+               action mirred ingress redirect dev lo
+
        $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
        $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
        $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
-
-       $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
-       $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
-       $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1
        set +e
 
-       run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
+       run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1"
        log_test $? 0 "rp_filter passes local packets"
 
-       run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
+       run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1"
        log_test $? 0 "rp_filter passes loopback packets"
 
        cleanup
index bf17e48..b0980a2 100644 (file)
@@ -13,6 +13,8 @@ NETIFS[p5]=veth4
 NETIFS[p6]=veth5
 NETIFS[p7]=veth6
 NETIFS[p8]=veth7
+NETIFS[p9]=veth8
+NETIFS[p10]=veth9
 
 # Port that does not have a cable connected.
 NETIF_NO_CABLE=eth8
index ecbf57f..7b9d6e3 100755 (executable)
@@ -311,7 +311,7 @@ check_exception()
                ip -netns h1 ro get ${H1_VRF_ARG} ${H2_N2_IP} | \
                grep -E -v 'mtu|redirected' | grep -q "cache"
        fi
-       log_test $? 0 "IPv4: ${desc}"
+       log_test $? 0 "IPv4: ${desc}" 0
 
        # No PMTU info for test "redirect" and "mtu exception plus redirect"
        if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
index 0faaccd..2b82628 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_ADVANCED=y
 CONFIG_NETFILTER_NETLINK=m
 CONFIG_NF_TABLES=m
-CONFIG_NFT_COUNTER=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XTABLES=m
 CONFIG_NETFILTER_XT_MATCH_BPF=m
index 8a22db0..6e468e0 100644 (file)
@@ -31,6 +31,8 @@ struct tls_crypto_info_keys {
                struct tls12_crypto_info_chacha20_poly1305 chacha20;
                struct tls12_crypto_info_sm4_gcm sm4gcm;
                struct tls12_crypto_info_sm4_ccm sm4ccm;
+               struct tls12_crypto_info_aes_ccm_128 aesccm128;
+               struct tls12_crypto_info_aes_gcm_256 aesgcm256;
        };
        size_t len;
 };
@@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
                tls12->sm4ccm.info.version = tls_version;
                tls12->sm4ccm.info.cipher_type = cipher_type;
                break;
+       case TLS_CIPHER_AES_CCM_128:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128);
+               tls12->aesccm128.info.version = tls_version;
+               tls12->aesccm128.info.cipher_type = cipher_type;
+               break;
+       case TLS_CIPHER_AES_GCM_256:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256);
+               tls12->aesgcm256.info.version = tls_version;
+               tls12->aesgcm256.info.cipher_type = cipher_type;
+               break;
        default:
                break;
        }
@@ -261,6 +273,30 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
        .cipher_type = TLS_CIPHER_SM4_CCM,
 };
 
+FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
+{
+       .tls_version = TLS_1_2_VERSION,
+       .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_ccm)
+{
+       .tls_version = TLS_1_3_VERSION,
+       .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256)
+{
+       .tls_version = TLS_1_2_VERSION,
+       .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
+{
+       .tls_version = TLS_1_3_VERSION,
+       .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
 FIXTURE_SETUP(tls)
 {
        struct tls_crypto_info_keys tls12;
index 710ac95..c548934 100644 (file)
@@ -498,7 +498,7 @@ static void parse_opts(int argc, char **argv)
        bool have_toeplitz = false;
        int index, c;
 
-       while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:u:v", long_options, &index)) != -1) {
+       while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) {
                switch (c) {
                case '4':
                        cfg_family = AF_INET;
index 7f26591..6f05e06 100755 (executable)
@@ -132,7 +132,7 @@ run_test() {
        local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
                                                          sed -e 's/\[//' -e 's/:.*//'`
        if [ $rcv != $pkts ]; then
-               echo " fail - received $rvs packets, expected $pkts"
+               echo " fail - received $rcv packets, expected $pkts"
                ret=1
                return
        fi
@@ -185,6 +185,7 @@ for family in 4 6; do
        IPT=iptables
        SUFFIX=24
        VXDEV=vxlan
+       PING=ping
 
        if [ $family = 6 ]; then
                BM_NET=$BM_NET_V6
@@ -192,6 +193,8 @@ for family in 4 6; do
                SUFFIX="64 nodad"
                VXDEV=vxlan6
                IPT=ip6tables
+               # Use ping6 on systems where ping doesn't handle IPv6
+               ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6"
        fi
 
        echo "IPv$family"
@@ -237,7 +240,7 @@ for family in 4 6; do
 
        # load arp cache before running the test to reduce the amount of
        # stray traffic on top of the UDP tunnel
-       ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null
+       ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
        run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
        cleanup
 
index c66da6f..7badaf2 100644 (file)
@@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
        },
        {
                /* send max number of min sized segments */
-               .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
+               .tlen = UDP_MAX_SEGMENTS,
                .gso_len = 1,
-               .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
+               .r_num_mss = UDP_MAX_SEGMENTS,
        },
        {
                /* send max number + 1 of min sized segments: fail */
-               .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
+               .tlen = UDP_MAX_SEGMENTS + 1,
                .gso_len = 1,
                .tfail = true,
        },
@@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
        },
        {
                /* send max number of min sized segments */
-               .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
+               .tlen = UDP_MAX_SEGMENTS,
                .gso_len = 1,
-               .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
+               .r_num_mss = UDP_MAX_SEGMENTS,
        },
        {
                /* send max number + 1 of min sized segments: fail */
-               .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
+               .tlen = UDP_MAX_SEGMENTS + 1,
                .gso_len = 1,
                .tfail = true,
        },
index 17512a4..f1fdaa2 100644 (file)
@@ -419,6 +419,7 @@ static void usage(const char *filepath)
 
 static void parse_opts(int argc, char **argv)
 {
+       const char *bind_addr = NULL;
        int max_len, hdrlen;
        int c;
 
@@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
                        cfg_cpu = strtol(optarg, NULL, 0);
                        break;
                case 'D':
-                       setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+                       bind_addr = optarg;
                        break;
                case 'l':
                        cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
@@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
                }
        }
 
+       if (!bind_addr)
+               bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
+
+       setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
+
        if (optind != argc)
                usage(argv[0]);
 
index 91f3ef0..8b5ea92 100755 (executable)
@@ -150,11 +150,27 @@ EOF
 # oifname is the vrf device.
 test_masquerade_vrf()
 {
+       local qdisc=$1
+
+       if [ "$qdisc" != "default" ]; then
+               tc -net $ns0 qdisc add dev tvrf root $qdisc
+       fi
+
        ip netns exec $ns0 conntrack -F 2>/dev/null
 
 ip netns exec $ns0 nft -f - <<EOF
 flush ruleset
 table ip nat {
+       chain rawout {
+               type filter hook output priority raw;
+
+               oif tvrf ct state untracked counter
+       }
+       chain postrouting2 {
+               type filter hook postrouting priority mangle;
+
+               oif tvrf ct state untracked counter
+       }
        chain postrouting {
                type nat hook postrouting priority 0;
                # NB: masquerade should always be combined with 'oif(name) bla',
@@ -171,13 +187,18 @@ EOF
        fi
 
        # must also check that nat table was evaluated on second (lower device) iteration.
-       ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+       ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' &&
+       ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]'
        if [ $? -eq 0 ]; then
-               echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device"
+               echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)"
        else
-               echo "FAIL: vrf masq rule has unexpected counter value"
+               echo "FAIL: vrf rules have unexpected counter value"
                ret=1
        fi
+
+       if [ "$qdisc" != "default" ]; then
+               tc -net $ns0 qdisc del dev tvrf root
+       fi
 }
 
 # add masq rule that gets evaluated w. outif set to veth device.
@@ -213,7 +234,8 @@ EOF
 }
 
 test_ct_zone_in
-test_masquerade_vrf
+test_masquerade_vrf "default"
+test_masquerade_vrf "pfifo"
 test_masquerade_veth
 
 exit $ret
index 5a4938d..ed61f6c 100755 (executable)
@@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout"
 
 # Set types, defined by TYPE_ variables below
 TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
-       net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
-       net_port_mac_proto_net"
+       net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp
+       net6_port_net6_port net_port_mac_proto_net"
 
 # Reported bugs, also described by TYPE_ variables below
 BUGS="flush_remove_add"
@@ -277,6 +277,23 @@ perf_entries       1000
 perf_proto     ipv4
 "
 
+TYPE_mac_net="
+display                mac,net
+type_spec      ether_addr . ipv4_addr
+chain_spec     ether saddr . ip saddr
+dst             
+src            mac addr4
+start          1
+count          5
+src_delta      2000
+tools          sendip nc bash
+proto          udp
+
+race_repeat    0
+
+perf_duration  0
+"
+
 TYPE_net_mac_icmp="
 display                net,mac - ICMP
 type_spec      ipv4_addr . ether_addr
@@ -984,7 +1001,8 @@ format() {
                fi
        done
        for f in ${src}; do
-               __expr="${__expr} . "
+               [ "${__expr}" != "{ " ] && __expr="${__expr} . "
+
                __start="$(eval format_"${f}" "${srcstart}")"
                __end="$(eval format_"${f}" "${srcend}")"
 
index ac64637..0463311 100755 (executable)
@@ -18,11 +18,17 @@ cleanup()
        ip netns del $ns
 }
 
-ip netns add $ns
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not create net namespace $gw"
-       exit $ksft_skip
-fi
+checktool (){
+       if ! $1 > /dev/null 2>&1; then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "socat -V" "run test without socat tool"
+checktool "ip netns add $ns" "create net namespace"
 
 trap cleanup EXIT
 
@@ -71,7 +77,8 @@ EOF
                local start=$(date +%s%3N)
                i=$((i + 10000))
                j=$((j + 1))
-               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+               # nft rule in output places each packet in a different zone.
+               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
                if [ $? -ne 0 ] ;then
                        ret=1
                        break
index b71828d..a3239d5 100644 (file)
@@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
 CONFIG_NET_SCH_ETS=m
 CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NETDEVSIM=m
 
 #
 ## Network testing
index a3e4318..ee22e34 100755 (executable)
@@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining):
         list_test_cases(alltests)
         exit(0)
 
+    exit_code = 0 # KSFT_PASS
     if len(alltests):
         req_plugins = pm.get_required_plugins(alltests)
         try:
@@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining):
             print('The following plugins were not found:')
             print('{}'.format(pde.missing_pg))
         catresults = test_runner(pm, args, alltests)
+        if catresults.count_failures() != 0:
+            exit_code = 1 # KSFT_FAIL
         if args.format == 'none':
             print('Test results output suppression requested\n')
         else:
@@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining):
                         gid=int(os.getenv('SUDO_GID')))
     else:
         print('No tests found\n')
+        exit_code = 4 # KSFT_SKIP
+    exit(exit_code)
 
 def main():
     """
@@ -767,8 +772,5 @@ def main():
 
     set_operation_mode(pm, parser, args, remaining)
 
-    exit(0)
-
-
 if __name__ == "__main__":
     main()
index 7fe38c7..afb0cd8 100755 (executable)
@@ -1,5 +1,6 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+modprobe netdevsim
 ./tdc.py -c actions --nobuildebpf
 ./tdc.py -c qdisc
index 8a09057..9354a5e 100644 (file)
@@ -87,7 +87,7 @@ static bool test_uffdio_minor = false;
 
 static bool map_shared;
 static int shm_fd;
-static int huge_fd;
+static int huge_fd = -1;       /* only used for hugetlb_shared test */
 static char *huge_fd_off0;
 static unsigned long long *count_verify;
 static int uffd = -1;
@@ -223,6 +223,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
 
 static void hugetlb_release_pages(char *rel_area)
 {
+       if (huge_fd == -1)
+               return;
+
        if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                      rel_area == huge_fd_off0 ? 0 : nr_pages * page_size,
                      nr_pages * page_size))
@@ -235,16 +238,17 @@ static void hugetlb_allocate_area(void **alloc_area)
        char **alloc_area_alias;
 
        *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
-                          (map_shared ? MAP_SHARED : MAP_PRIVATE) |
-                          MAP_HUGETLB,
-                          huge_fd, *alloc_area == area_src ? 0 :
-                          nr_pages * page_size);
+                          map_shared ? MAP_SHARED :
+                          MAP_PRIVATE | MAP_HUGETLB |
+                          (*alloc_area == area_src ? 0 : MAP_NORESERVE),
+                          huge_fd,
+                          *alloc_area == area_src ? 0 : nr_pages * page_size);
        if (*alloc_area == MAP_FAILED)
                err("mmap of hugetlbfs file failed");
 
        if (map_shared) {
                area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
-                                 MAP_SHARED | MAP_HUGETLB,
+                                 MAP_SHARED,
                                  huge_fd, *alloc_area == area_src ? 0 :
                                  nr_pages * page_size);
                if (area_alias == MAP_FAILED)