Merge tag 'arm64-spectre-bhb-for-v5.17-2' of git://git.kernel.org/pub/scm/linux/kerne...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Mar 2022 17:27:25 +0000 (09:27 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Mar 2022 17:27:25 +0000 (09:27 -0800)
Pull arm64 spectre fixes from James Morse:
 "ARM64 Spectre-BHB mitigations:

   - Make EL1 vectors per-cpu

   - Add mitigation sequences to the EL1 and EL2 vectors on vulnerble
     CPUs

   - Implement ARCH_WORKAROUND_3 for KVM guests

   - Report Vulnerable when unprivileged eBPF is enabled"

* tag 'arm64-spectre-bhb-for-v5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting
  arm64: Use the clearbhb instruction in mitigations
  KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated
  arm64: Mitigate spectre style branch history side channels
  arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2
  arm64: Add percpu vectors for EL1
  arm64: entry: Add macro for reading symbol addresses from the trampoline
  arm64: entry: Add vectors that have the bhb mitigation sequences
  arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations
  arm64: entry: Allow the trampoline text to occupy multiple pages
  arm64: entry: Make the kpti trampoline's kpti sequence optional
  arm64: entry: Move trampoline macros out of ifdef'd section
  arm64: entry: Don't assume tramp_vectors is the start of the vectors
  arm64: entry: Allow tramp_alias to access symbols after the 4K boundary
  arm64: entry: Move the trampoline data page before the text page
  arm64: entry: Free up another register on kpti's tramp_exit path
  arm64: entry: Make the trampoline cleanup optional
  KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A
  arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit
  arm64: entry.S: Add ventry overflow sanity checks

983 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/sysfs-class-power
Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
Documentation/admin-guide/hw-vuln/spectre.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/mm/pagemap.rst
Documentation/cpu-freq/cpu-drivers.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/arm/atmel-at91.yaml
Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt
Documentation/devicetree/bindings/arm/omap/omap.txt
Documentation/devicetree/bindings/clock/qoriq-clock.txt
Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
Documentation/devicetree/bindings/usb/dwc2.yaml
Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
Documentation/tools/rtla/common_hist_options.rst
Documentation/tools/rtla/common_osnoise_description.rst
Documentation/tools/rtla/rtla-osnoise-hist.rst
Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-wega.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx23-evk.dts
arch/arm/boot/dts/imx6qdl-udoo.dtsi
arch/arm/boot/dts/imx7ulp.dtsi
arch/arm/boot/dts/meson.dtsi
arch/arm/boot/dts/meson8.dtsi
arch/arm/boot/dts/meson8b.dtsi
arch/arm/boot/dts/omap3-beagle-ab4.dts [new file with mode: 0644]
arch/arm/boot/dts/omap3-beagle.dts
arch/arm/boot/dts/omap3-devkit8000-common.dtsi
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/rk322x.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/spear320-hmi.dts
arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
arch/arm/boot/dts/tegra124-nyan-big.dts
arch/arm/boot/dts/tegra124-nyan-blaze.dts
arch/arm/boot/dts/tegra124-venice2.dts
arch/arm/include/asm/assembler.h
arch/arm/include/asm/spectre.h [new file with mode: 0644]
arch/arm/include/asm/vmlinux.lds.h
arch/arm/kernel/Makefile
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/kgdb.c
arch/arm/kernel/spectre.c [new file with mode: 0644]
arch/arm/kernel/traps.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-socfpga/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7-bugs.c
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
arch/arm64/boot/dts/arm/juno-base.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/freescale/imx8ulp.dtsi
arch/arm64/boot/dts/freescale/mba8mx.dtsi
arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
arch/arm64/boot/dts/rockchip/px30.dtsi
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
arch/arm64/boot/dts/rockchip/rk3568.dtsi
arch/arm64/boot/dts/rockchip/rk356x.dtsi
arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
arch/arm64/boot/dts/ti/k3-j721s2.dtsi
arch/arm64/include/asm/el2_setup.h
arch/arm64/kvm/psci.c
arch/arm64/kvm/vgic/vgic-mmio.c
arch/mips/boot/dts/ingenic/ci20.dts
arch/mips/kernel/setup.c
arch/mips/kernel/smp.c
arch/mips/ralink/mt7621.c
arch/parisc/include/asm/bitops.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/unaligned.c
arch/parisc/lib/iomap.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/include/asm/kexec_ranges.h
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/lib/sstep.c
arch/riscv/Makefile
arch/riscv/boot/dts/canaan/k210.dtsi
arch/riscv/configs/nommu_k210_sdcard_defconfig
arch/riscv/include/asm/page.h
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/cpu-hotplug.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/head.S
arch/riscv/kernel/sbi.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/trace_irq.c [new file with mode: 0644]
arch/riscv/kernel/trace_irq.h [new file with mode: 0644]
arch/riscv/mm/Makefile
arch/riscv/mm/extable.c
arch/riscv/mm/init.c
arch/riscv/mm/kasan_init.c
arch/riscv/mm/physaddr.c
arch/s390/include/asm/extable.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/ptrace.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount.S
arch/s390/kernel/setup.c
arch/s390/kvm/kvm-s390.c
arch/s390/lib/test_modules.c
arch/s390/lib/test_modules.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/svm.h
arch/x86/include/asm/xen/cpuid.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/sgx/encl.c
arch/x86/kernel/cpu/sgx/main.c
arch/x86/kernel/fpu/regset.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/resource.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/lib/retpoline.S
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/enlighten_hvm.c
arch/x86/xen/vga.c
block/bfq-iosched.c
block/blk-core.c
block/blk-map.c
block/blk-mq.c
block/elevator.c
block/fops.c
block/genhd.c
crypto/af_alg.c
crypto/algapi.c
crypto/api.c
drivers/accessibility/speakup/speakup_dectlk.c
drivers/acpi/arm64/iort.c
drivers/acpi/ec.c
drivers/acpi/processor_idle.c
drivers/acpi/sleep.c
drivers/acpi/tables.c
drivers/acpi/x86/s2idle.c
drivers/ata/libata-core.c
drivers/ata/pata_hpt37x.c
drivers/ata/sata_fsl.c
drivers/atm/firestream.c
drivers/auxdisplay/lcd2s.c
drivers/base/dd.c
drivers/base/power/wakeup.c
drivers/base/regmap/regmap-irq.c
drivers/block/loop.c
drivers/block/loop.h
drivers/block/mtip32xx/mtip32xx.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/bus/mhi/pci_generic.c
drivers/char/virtio_console.c
drivers/clk/ingenic/jz4725b-cgu.c
drivers/clk/qcom/gcc-msm8994.c
drivers/clocksource/timer-ti-dm-systimer.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
drivers/dma/at_xdmac.c
drivers/dma/ptdma/ptdma-dev.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sh/shdma-base.c
drivers/dma/stm32-dmamux.c
drivers/edac/edac_mc.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/efi/libstub/riscv-stub.c
drivers/firmware/efi/vars.c
drivers/gpio/gpio-aggregator.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpio-sifive.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpio-tegra186.c
drivers/gpio/gpiolib-cdev.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/arm/Kconfig
drivers/gpu/drm/bridge/nwl-dsi.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_atomic_uapi.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_privacy_screen.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/display/intel_bw.c
drivers/gpu/drm/i915/display/intel_bw.h
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_drrs.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_opregion.c
drivers/gpu/drm/i915/display/intel_snps_phy.c
drivers/gpu/drm/i915/display/intel_tc.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/i915_mm.h
drivers/gpu/drm/i915/intel_pch.c
drivers/gpu/drm/i915/intel_pch.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_pm.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/imx/dcss/Kconfig
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/tegra/Kconfig
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/falcon.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_hdmi.h
drivers/gpu/host1x/syncpt.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
drivers/hid/hid-apple.c
drivers/hid/hid-debug.c
drivers/hid/hid-elo.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-quirks.c
drivers/hid/i2c-hid/i2c-hid-of-goodix.c
drivers/hv/hv_utils_transport.c
drivers/hv/vmbus_drv.c
drivers/hwmon/hwmon.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-bcm2835.c
drivers/i2c/busses/i2c-brcmstb.c
drivers/i2c/busses/i2c-qcom-cci.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/fxls8962af-core.c
drivers/iio/accel/fxls8962af-i2c.c
drivers/iio/accel/fxls8962af-spi.c
drivers/iio/accel/fxls8962af.h
drivers/iio/accel/kxcjk-1013.c
drivers/iio/accel/mma9551.c
drivers/iio/accel/mma9553.c
drivers/iio/adc/ad7124.c
drivers/iio/adc/men_z188_adc.c
drivers/iio/adc/ti-tsc2046.c
drivers/iio/addac/ad74413r.c
drivers/iio/frequency/admv1013.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/imu/adis16480.c
drivers/iio/imu/kmx61.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
drivers/iio/industrialio-buffer.c
drivers/iio/magnetometer/bmc150_magn.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/input.c
drivers/input/keyboard/Kconfig
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/psmouse-smbus.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/zinitix.c
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/iommu.c
drivers/iommu/intel/iommu.c
drivers/iommu/tegra-smmu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-sifive-plic.c
drivers/md/dm.c
drivers/misc/eeprom/ee1004.c
drivers/misc/fastrpc.c
drivers/mmc/core/block.c
drivers/mmc/core/sd.c
drivers/mmc/host/moxart-mmc.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sh_mmcif.c
drivers/mtd/devices/phram.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/Kconfig
drivers/mtd/nand/raw/brcmnand/brcmnand.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/parsers/qcomsmempart.c
drivers/net/arcnet/com20020-pci.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/can/rcar/rcar_canfd.c
drivers/net/can/usb/etas_es58x/es58x_core.c
drivers/net/can/usb/etas_es58x/es58x_core.h
drivers/net/can/usb/gs_usb.c
drivers/net/dsa/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/ocelot/seville_vsc9953.c
drivers/net/dsa/qca/ar9331.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_eswitch.c
drivers/net/ethernet/intel/ice/ice_lag.c
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_protocol_type.h
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/igc/igc_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/litex/Kconfig
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.h
drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/hamradio/6pack.c
drivers/net/ieee802154/ca8210.c
drivers/net/ipa/Kconfig
drivers/net/mctp/mctp-serial.c
drivers/net/mdio/mdio-aspeed.c
drivers/net/mdio/mdio-ipq4019.c
drivers/net/netdevsim/fib.c
drivers/net/phy/marvell.c
drivers/net/phy/mediatek-ge.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/sr9700.c
drivers/net/usb/zaurus.c
drivers/net/veth.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/intel/Makefile
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/rs.c
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mei/main.c
drivers/net/wireless/intel/iwlwifi/mei/net.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/ntb/hw/intel/ntb_hw_gen4.c
drivers/ntb/hw/intel/ntb_hw_gen4.h
drivers/ntb/msi.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/tcp.c
drivers/nvmem/core.c
drivers/of/fdt.c
drivers/of/unittest.c
drivers/parisc/ccio-dma.c
drivers/parisc/sba_iommu.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/controller/pci-mvebu.c
drivers/pci/controller/vmd.c
drivers/pci/pcie/portdrv_core.c
drivers/pci/quirks.c
drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
drivers/phy/broadcom/Kconfig
drivers/phy/broadcom/phy-brcm-usb.c
drivers/phy/cadence/phy-cadence-sierra.c
drivers/phy/mediatek/phy-mtk-tphy.c
drivers/phy/phy-core-mipi-dphy.c
drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
drivers/phy/st/phy-stm32-usbphyc.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/phy/xilinx/phy-zynqmp.c
drivers/pinctrl/intel/pinctrl-tigerlake.c
drivers/pinctrl/pinctrl-k210.c
drivers/pinctrl/pinctrl-starfive.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/platform/surface/surface3_power.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/intel/int3472/tps68470_board_data.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/supply/bq256xx_charger.c
drivers/power/supply/cw2015_battery.c
drivers/ptp/ptp_ocp.c
drivers/regulator/core.c
drivers/regulator/da9121-regulator.c
drivers/s390/cio/device.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/aspeed/aspeed-lpc-ctrl.c
drivers/soc/fsl/guts.c
drivers/soc/fsl/qe/qe.c
drivers/soc/fsl/qe/qe_io.c
drivers/soc/imx/gpcv2.c
drivers/soc/mediatek/mtk-scpsys.c
drivers/soc/samsung/Kconfig
drivers/spi/spi-rockchip.c
drivers/spi/spi-zynq-qspi.c
drivers/staging/fbtft/fb_st7789v.c
drivers/staging/fbtft/fbtft.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/tee/optee/core.c
drivers/tee/optee/ffa_abi.c
drivers/tee/optee/notif.c
drivers/tee/optee/optee_private.h
drivers/tee/optee/smc_abi.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/thermal_netlink.c
drivers/tty/n_gsm.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_gsc.c
drivers/tty/serial/8250/8250_pericom.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/common/ulpi.c
drivers/usb/core/port.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/drd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-xilinx.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/rndis.c
drivers/usb/gadget/function/rndis.h
drivers/usb/gadget/legacy/raw_gadget.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/gadget/udc/udc-xilinx.c
drivers/usb/host/xhci.c
drivers/usb/misc/usb251xb.c
drivers/usb/serial/ch341.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/typec/tipd/core.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_user/iova_domain.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vhost/iotlb.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vsock.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbmem.c
drivers/virtio/Kconfig
drivers/virtio/virtio.c
drivers/virtio/virtio_vdpa.c
drivers/xen/pci.c
fs/Kconfig
fs/binfmt_elf.c
fs/binfmt_misc.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/send.c
fs/btrfs/subpage.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/cachefiles/interface.c
fs/cifs/cifs_swn.c
fs/cifs/cifsacl.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/fs_context.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/transport.c
fs/cifs/xattr.c
fs/configfs/dir.c
fs/erofs/internal.h
fs/file_table.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/io_uring.c
fs/ksmbd/auth.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb_common.c
fs/ksmbd/transport_rdma.c
fs/ksmbd/vfs.h
fs/namespace.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4xdr.c
fs/nfsd/trace.h
fs/nfsd/vfs.c
fs/nfsd/vfs.h
fs/proc/task_mmu.c
fs/tracefs/inode.c
fs/userfaultfd.c
fs/xfs/xfs_super.c
include/dt-bindings/clock/dra7.h
include/linux/ata.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/compiler.h
include/linux/cpufreq.h
include/linux/hyperv.h
include/linux/kfence.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mm_types.h
include/linux/netdevice.h
include/linux/netfilter_netdev.h
include/linux/nfs.h
include/linux/nfs_fs.h
include/linux/nvme-tcp.h
include/linux/nvmem-provider.h
include/linux/rfkill.h
include/linux/sched/task.h
include/linux/slab.h
include/linux/suspend.h
include/linux/trace_events.h
include/linux/vdpa.h
include/linux/virtio.h
include/linux/virtio_config.h
include/net/addrconf.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci_core.h
include/net/bond_3ad.h
include/net/checksum.h
include/net/dsa.h
include/net/dst_metadata.h
include/net/ip6_fib.h
include/net/ipv6.h
include/net/ndisc.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_offload.h
include/net/netns/ipv6.h
include/net/sock.h
include/net/xfrm.h
include/soc/fsl/dpaa2-fd.h
include/soc/fsl/qe/immap_qe.h
include/soc/fsl/qe/qe_tdm.h
include/soc/fsl/qe/ucc_fast.h
include/soc/fsl/qe/ucc_slow.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/kvm.h
include/uapi/linux/netfilter/nf_conntrack_common.h
include/uapi/linux/xfrm.h
kernel/auditsc.c
kernel/bpf/btf.c
kernel/bpf/helpers.c
kernel/bpf/syscall.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/configs/debug.config
kernel/cred.c
kernel/dma/swiotlb.c
kernel/events/core.c
kernel/fork.c
kernel/locking/lockdep.c
kernel/module_decompress.c
kernel/power/main.c
kernel/power/process.c
kernel/power/suspend.c
kernel/sched/core.c
kernel/seccomp.c
kernel/signal.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_eprobe.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_osnoise.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_selftest.c
kernel/ucount.c
kernel/user_namespace.c
lib/Kconfig
lib/iov_iter.c
lib/test_kasan.c
mm/hugetlb.c
mm/kfence/core.c
mm/kfence/kfence_test.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memfd.c
mm/mempolicy.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/util.c
mm/vmscan.c
net/8021q/vlan.h
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/ax25/af_ax25.c
net/batman-adv/hard-interface.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sync.c
net/bluetooth/mgmt.c
net/bluetooth/mgmt_util.c
net/bridge/br_multicast.c
net/can/isotp.c
net/can/j1939/transport.c
net/core/drop_monitor.c
net/core/filter.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock.c
net/dcb/dcbnl.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/master.c
net/dsa/port.c
net/dsa/tag_lan9303.c
net/ipv4/af_inet.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/udp_tunnel_nic.c
net/ipv6/addrconf.c
net/ipv6/esp6.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/route.c
net/key/af_key.c
net/mac80211/agg-tx.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/rx.c
net/mctp/route.c
net/mpls/af_mpls.c
net/mptcp/mib.c
net/mptcp/mib.h
net/mptcp/pm.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/netfilter/core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_dup_netdev.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_fwd_netdev.c
net/netfilter/nft_immediate.c
net/netfilter/nft_limit.c
net/netfilter/nft_payload.c
net/netfilter/nft_synproxy.c
net/netfilter/xt_socket.c
net/openvswitch/actions.c
net/sched/act_api.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_pnet.c
net/smc/smc_pnet.h
net/socket.c
net/sunrpc/sysfs.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtsock.c
net/tipc/crypto.c
net/tipc/link.c
net/tipc/monitor.c
net/tipc/name_distr.c
net/tipc/name_table.c
net/tipc/node.c
net/tipc/socket.c
net/vmw_vsock/af_vsock.c
net/wireless/Makefile
net/wireless/core.c
net/wireless/nl80211.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/seccomp/dropper.c
scripts/Makefile.extrawarn
scripts/kconfig/confdata.c
scripts/kconfig/preprocess.c
security/integrity/digsig_asymmetric.c
security/integrity/ima/ima_fs.c
security/integrity/ima/ima_policy.c
security/integrity/ima/ima_template.c
security/integrity/integrity_audit.c
security/selinux/ima.c
sound/core/memalloc.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp/acp-mach.h
sound/soc/amd/acp/acp-sof-mach.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/rt5668.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682s.c
sound/soc/codecs/tas2770.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/skylake/skl.c
sound/soc/qcom/lpass-platform.c
sound/soc/soc-ops.c
sound/soc/sof/intel/hda.c
sound/usb/implicit.c
sound/usb/mixer.c
sound/x86/intel_hdmi_audio.c
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/cgroup/memcg_slabinfo.py
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/lib/perf/include/internal/cpumap.h
tools/lib/perf/include/perf/cpumap.h
tools/lib/perf/libperf.map
tools/lib/perf/tests/test-cpumap.c
tools/lib/perf/tests/test-evlist.c
tools/lib/subcmd/subcmd-util.h
tools/perf/builtin-script.c
tools/perf/builtin-trace.c
tools/perf/tests/attr/README
tools/perf/tests/attr/test-record-graph-default
tools/perf/tests/attr/test-record-graph-default-aarch64 [new file with mode: 0644]
tools/perf/tests/attr/test-record-graph-fp
tools/perf/tests/attr/test-record-graph-fp-aarch64 [new file with mode: 0644]
tools/perf/tests/sigtrap.c
tools/perf/util/bpf-loader.c
tools/perf/util/cs-etm.c
tools/perf/util/data.c
tools/perf/util/evlist-hybrid.c
tools/testing/kunit/run_checks.py
tools/testing/selftests/bpf/prog_tests/timer_crash.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
tools/testing/selftests/bpf/progs/timer_crash.c [new file with mode: 0644]
tools/testing/selftests/clone3/clone3.c
tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
tools/testing/selftests/exec/Makefile
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
tools/testing/selftests/ir/ir_loopback.c
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/arch_timer.c
tools/testing/selftests/kvm/aarch64/vgic_irq.c
tools/testing/selftests/kvm/lib/aarch64/vgic.c
tools/testing/selftests/memfd/memfd_test.c
tools/testing/selftests/mount_setattr/mount_setattr_test.c
tools/testing/selftests/net/mptcp/diag.sh
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/netfilter/.gitignore
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/connect_close.c [new file with mode: 0644]
tools/testing/selftests/netfilter/nft_concat_range.sh
tools/testing/selftests/netfilter/nft_fib.sh
tools/testing/selftests/netfilter/nft_queue.sh
tools/testing/selftests/netfilter/nft_synproxy.sh [new file with mode: 0755]
tools/testing/selftests/pidfd/pidfd.h
tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
tools/testing/selftests/pidfd/pidfd_test.c
tools/testing/selftests/pidfd/pidfd_wait.c
tools/testing/selftests/seccomp/Makefile
tools/testing/selftests/vm/hugepage-mremap.c
tools/testing/selftests/vm/map_fixed_noreplace.c
tools/testing/selftests/vm/run_vmtests.sh
tools/testing/selftests/vm/userfaultfd.c
tools/tracing/rtla/Makefile
tools/tracing/rtla/src/osnoise.c
tools/tracing/rtla/src/osnoise_hist.c
tools/tracing/rtla/src/osnoise_top.c
tools/tracing/rtla/src/timerlat_hist.c
tools/tracing/rtla/src/timerlat_top.c
tools/tracing/rtla/src/trace.c
tools/tracing/rtla/src/utils.c
tools/virtio/linux/mm_types.h [new file with mode: 0644]
tools/virtio/virtio_test.c
virt/kvm/kvm_main.c

index 8cd44b0..10ee110 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -333,6 +333,9 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
+Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
+Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
 Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
diff --git a/CREDITS b/CREDITS
index b97256d..7e85a53 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -895,6 +895,12 @@ S: 3000 FORE Drive
 S: Warrendale, Pennsylvania 15086
 S: USA
 
+N: Ludovic Desroches
+E: ludovic.desroches@microchip.com
+D: Maintainer for ARM/Microchip (AT91) SoC support
+D: Author of ADC, pinctrl, XDMA and SDHCI drivers for this platform
+S: France
+
 N: Martin Devera
 E: devik@cdi.cz
 W: http://luxik.cdi.cz/~devik/qos/
index fde21d9..8595013 100644 (file)
@@ -468,6 +468,7 @@ Description:
                        auto:            Charge normally, respect thresholds
                        inhibit-charge:  Do not charge while AC is attached
                        force-discharge: Force discharge while AC is attached
+                       ================ ====================================
 
 What:          /sys/class/power_supply/<supply_name>/technology
 Date:          May 2007
index b363827..910df0e 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/bus/platform/drivers/aspeed-uart-routing/*/uart*
+What:          /sys/bus/platform/drivers/aspeed-uart-routing/\*/uart\*
 Date:          September 2021
 Contact:       Oskar Senft <osk@google.com>
                Chia-Wei Wang <chiawei_wang@aspeedtech.com>
@@ -9,7 +9,7 @@ Description:    Selects the RX source of the UARTx device.
                depends on the selected file.
 
                e.g.
-               cat /sys/bus/platform/drivers/aspeed-uart-routing/*.uart_routing/uart1
+               cat /sys/bus/platform/drivers/aspeed-uart-routing/\*.uart_routing/uart1
                [io1] io2 io3 io4 uart2 uart3 uart4 io6
 
                In this case, UART1 gets its input from IO1 (physical serial port 1).
@@ -17,7 +17,7 @@ Description:  Selects the RX source of the UARTx device.
 Users:         OpenBMC.  Proposed changes should be mailed to
                openbmc@lists.ozlabs.org
 
-What:          /sys/bus/platform/drivers/aspeed-uart-routing/*/io*
+What:          /sys/bus/platform/drivers/aspeed-uart-routing/\*/io\*
 Date:          September 2021
 Contact:       Oskar Senft <osk@google.com>
                Chia-Wei Wang <chiawei_wang@aspeedtech.com>
index a2b22d5..9e95568 100644 (file)
@@ -60,8 +60,8 @@ privileged data touched during the speculative execution.
 Spectre variant 1 attacks take advantage of speculative execution of
 conditional branches, while Spectre variant 2 attacks use speculative
 execution of indirect branches to leak privileged memory.
-See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[7] <spec_ref7>`
-:ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
+See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[6] <spec_ref6>`
+:ref:`[7] <spec_ref7>` :ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
 
 Spectre variant 1 (Bounds Check Bypass)
 ---------------------------------------
@@ -131,6 +131,19 @@ steer its indirect branch speculations to gadget code, and measure the
 speculative execution's side effects left in level 1 cache to infer the
 victim's data.
 
+Yet another variant 2 attack vector is for the attacker to poison the
+Branch History Buffer (BHB) to speculatively steer an indirect branch
+to a specific Branch Target Buffer (BTB) entry, even if the entry isn't
+associated with the source address of the indirect branch. Specifically,
+the BHB might be shared across privilege levels even in the presence of
+Enhanced IBRS.
+
+Currently the only known real-world BHB attack vector is via
+unprivileged eBPF. Therefore, it's highly recommended to not enable
+unprivileged eBPF, especially when eIBRS is used (without retpolines).
+For a full mitigation against BHB attacks, it's recommended to use
+retpolines (or eIBRS combined with retpolines).
+
 Attack scenarios
 ----------------
 
@@ -364,13 +377,15 @@ The possible values in this file are:
 
   - Kernel status:
 
-  ====================================  =================================
-  'Not affected'                        The processor is not vulnerable
-  'Vulnerable'                          Vulnerable, no mitigation
-  'Mitigation: Full generic retpoline'  Software-focused mitigation
-  'Mitigation: Full AMD retpoline'      AMD-specific software mitigation
-  'Mitigation: Enhanced IBRS'           Hardware-focused mitigation
-  ====================================  =================================
+  ========================================  =================================
+  'Not affected'                            The processor is not vulnerable
+  'Mitigation: None'                        Vulnerable, no mitigation
+  'Mitigation: Retpolines'                  Use Retpoline thunks
+  'Mitigation: LFENCE'                      Use LFENCE instructions
+  'Mitigation: Enhanced IBRS'               Hardware-focused mitigation
+  'Mitigation: Enhanced IBRS + Retpolines'  Hardware-focused + Retpolines
+  'Mitigation: Enhanced IBRS + LFENCE'      Hardware-focused + LFENCE
+  ========================================  =================================
 
   - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
     used to protect against Spectre variant 2 attacks when calling firmware (x86 only).
@@ -583,12 +598,13 @@ kernel command line.
 
                Specific mitigations can also be selected manually:
 
-               retpoline
-                                       replace indirect branches
-               retpoline,generic
-                                       google's original retpoline
-               retpoline,amd
-                                       AMD-specific minimal thunk
+                retpoline               auto pick between generic,lfence
+                retpoline,generic       Retpolines
+                retpoline,lfence        LFENCE; indirect branch
+                retpoline,amd           alias for retpoline,lfence
+                eibrs                   enhanced IBRS
+                eibrs,retpoline         enhanced IBRS + Retpolines
+                eibrs,lfence            enhanced IBRS + LFENCE
 
                Not specifying this option is equivalent to
                spectre_v2=auto.
@@ -599,7 +615,7 @@ kernel command line.
                spectre_v2=off. Spectre variant 1 mitigations
                cannot be disabled.
 
-For spectre_v2_user see :doc:`/admin-guide/kernel-parameters`.
+For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
 
 Mitigation selection guide
 --------------------------
@@ -681,7 +697,7 @@ AMD white papers:
 
 .. _spec_ref6:
 
-[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/90343-B_SoftwareTechniquesforManagingSpeculation_WP_7-18Update_FNL.pdf>`_.
+[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/Managing-Speculation-on-AMD-Processors.pdf>`_.
 
 ARM white papers:
 
index f5a27f0..7123524 100644 (file)
                        Specific mitigations can also be selected manually:
 
                        retpoline         - replace indirect branches
-                       retpoline,generic - google's original retpoline
-                       retpoline,amd     - AMD-specific minimal thunk
+                       retpoline,generic - Retpolines
+                       retpoline,lfence  - LFENCE; indirect branch
+                       retpoline,amd     - alias for retpoline,lfence
+                       eibrs             - enhanced IBRS
+                       eibrs,retpoline   - enhanced IBRS + Retpolines
+                       eibrs,lfence      - enhanced IBRS + LFENCE
 
                        Not specifying this option is equivalent to
                        spectre_v2=auto.
index bfc2870..6e2e416 100644 (file)
@@ -23,7 +23,7 @@ There are four components to pagemap:
     * Bit  56    page exclusively mapped (since 4.2)
     * Bit  57    pte is uffd-wp write-protected (since 5.13) (see
       :ref:`Documentation/admin-guide/mm/userfaultfd.rst <userfaultfd>`)
-    * Bits 57-60 zero
+    * Bits 58-60 zero
     * Bit  61    page is file-page or shared-anon (since 3.5)
     * Bit  62    page swapped
     * Bit  63    page present
index 3b32336..d84eded 100644 (file)
@@ -75,6 +75,9 @@ And optionally
  .resume - A pointer to a per-policy resume function which is called
  with interrupts disabled and _before_ the governor is started again.
 
+ .ready - A pointer to a per-policy ready function which is called after
+ the policy is fully initialized.
+
  .attr - A pointer to a NULL-terminated list of "struct freq_attr" which
  allow to export values to sysfs.
 
index 76af931..1c83e7d 100644 (file)
@@ -242,7 +242,7 @@ example:
 
        int rectangle_area(struct shape *this)
        {
-               struct rectangle *self = container_of(this, struct shape, parent);
+               struct rectangle *self = container_of(this, struct rectangle, parent);
 
                return self->length * self->width;
        };
index c612e1f..ff91df0 100644 (file)
@@ -8,7 +8,8 @@ title: Atmel AT91 device tree bindings.
 
 maintainers:
   - Alexandre Belloni <alexandre.belloni@bootlin.com>
-  - Ludovic Desroches <ludovic.desroches@microchip.com>
+  - Claudiu Beznea <claudiu.beznea@microchip.com>
+  - Nicolas Ferre <nicolas.ferre@microchip.com>
 
 description: |
   Boards with a SoC of the Atmel AT91 or SMART family shall have the following
index b5cb374..10a91cc 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
   - compatible: Should contain a chip-specific compatible string,
        Chip-specific strings are of the form "fsl,<chip>-dcfg",
        The following <chip>s are known to be supported:
-       ls1012a, ls1021a, ls1043a, ls1046a, ls2080a.
+       ls1012a, ls1021a, ls1043a, ls1046a, ls2080a, lx2160a
 
   - reg : should contain base address and length of DCFG memory-mapped registers
 
index e77635c..fa8b316 100644 (file)
@@ -119,6 +119,9 @@ Boards (incomplete list of examples):
 - OMAP3 BeagleBoard : Low cost community board
   compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
 
+- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk
+  compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+
 - OMAP3 Tobi with Overo : Commercial expansion board with daughter board
   compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
 
index f7d48f2..10119d9 100644 (file)
@@ -44,6 +44,7 @@ Required properties:
        * "fsl,ls1046a-clockgen"
        * "fsl,ls1088a-clockgen"
        * "fsl,ls2080a-clockgen"
+       * "fsl,lx2160a-clockgen"
        Chassis-version clock strings include:
        * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
        * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
index e043495..427c587 100644 (file)
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: SiFive GPIO controller
 
 maintainers:
-  - Yash Shah <yash.shah@sifive.com>
   - Paul Walmsley <paul.walmsley@sifive.com>
 
 properties:
index 0dfa6b2..27092c6 100644 (file)
@@ -35,6 +35,10 @@ description:
   contains a specific memory layout, which is documented in chapter 8 of the
   SiFive U5 Coreplex Series Manual <https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf>.
 
+  The thead,c900-plic is different from sifive,plic-1.0.0 in opensbi, the
+  T-HEAD PLIC implementation requires setting a delegation bit to allow access
+  from S-mode. So add thead,c900-plic to distinguish them.
+
 maintainers:
   - Sagar Kadam <sagar.kadam@sifive.com>
   - Paul Walmsley  <paul.walmsley@sifive.com>
@@ -42,12 +46,17 @@ maintainers:
 
 properties:
   compatible:
-    items:
-      - enum:
-          - sifive,fu540-c000-plic
-          - starfive,jh7100-plic
-          - canaan,k210-plic
-      - const: sifive,plic-1.0.0
+    oneOf:
+      - items:
+          - enum:
+              - sifive,fu540-c000-plic
+              - starfive,jh7100-plic
+              - canaan,k210-plic
+          - const: sifive,plic-1.0.0
+      - items:
+          - enum:
+              - allwinner,sun20i-d1-plic
+          - const: thead,c900-plic
 
   reg:
     maxItems: 1
index 272832e..fa86691 100644 (file)
@@ -20,7 +20,7 @@ description: |
 
 maintainers:
   - Kishon Vijay Abraham I <kishon@ti.com>
-  - Roger Quadros <rogerq@ti.com
+  - Roger Quadros <rogerq@kernel.org>
 
 properties:
   compatible:
index cbbf5e8..f78d324 100644 (file)
@@ -8,7 +8,7 @@ title: OMAP USB2 PHY
 
 maintainers:
   - Kishon Vijay Abraham I <kishon@ti.com>
-  - Roger Quadros <rogerq@ti.com>
+  - Roger Quadros <rogerq@kernel.org>
 
 properties:
   compatible:
index 84e6691..db41cd7 100644 (file)
@@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: SiFive PWM controller
 
 maintainers:
-  - Yash Shah <yash.shah@sifive.com>
   - Sagar Kadam <sagar.kadam@sifive.com>
   - Paul Walmsley <paul.walmsley@sifive.com>
 
index 2b1f916..e2d330b 100644 (file)
@@ -9,7 +9,6 @@ title: SiFive L2 Cache Controller
 
 maintainers:
   - Sagar Kadam <sagar.kadam@sifive.com>
-  - Yash Shah <yash.shah@sifive.com>
   - Paul Walmsley  <paul.walmsley@sifive.com>
 
 description:
index 77adbeb..c3e9f34 100644 (file)
@@ -8,6 +8,7 @@ title: Audio codec controlled by ChromeOS EC
 
 maintainers:
   - Cheng-Yi Chiang <cychiang@chromium.org>
+  - Tzung-Bi Shih <tzungbi@google.com>
 
 description: |
   Google's ChromeOS EC codec is a digital mic codec provided by the
index f00867e..481aaa0 100644 (file)
@@ -53,6 +53,7 @@ properties:
           - const: st,stm32mp15-hsotg
           - const: snps,dwc2
       - const: samsung,s3c6400-hsotg
+      - const: intel,socfpga-agilex-hsotg
 
   reg:
     maxItems: 1
index a634774..eedde38 100644 (file)
@@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller
 
 maintainers:
-  - Roger Quadros <rogerq@ti.com>
+  - Roger Quadros <rogerq@kernel.org>
 
 properties:
   compatible:
index f6e91a5..4f7a212 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TI Keystone Soc USB Controller
 
 maintainers:
-  - Roger Quadros <rogerq@ti.com>
+  - Roger Quadros <rogerq@kernel.org>
 
 properties:
   compatible:
index 0266cd0..df53ff8 100644 (file)
@@ -2,7 +2,7 @@
 
         Set the histogram bucket size (default *1*).
 
-**-e**, **--entries** *N*
+**-E**, **--entries** *N*
 
         Set the number of entries of the histogram (default 256).
 
index 8973c5d..d5d6161 100644 (file)
@@ -1,7 +1,7 @@
 The **rtla osnoise** tool is an interface for the *osnoise* tracer. The
 *osnoise* tracer dispatches a kernel thread per-cpu. These threads read the
 time in a loop while with preemption, softirq and IRQs enabled, thus
-allowing all the sources of operating systme noise during its execution.
+allowing all the sources of operating system noise during its execution.
 The *osnoise*'s tracer threads take note of the delta between each time
 read, along with an interference counter of all sources of interference.
 At the end of each period, the *osnoise* tracer displays a summary of
index 52298dd..f2e79d2 100644 (file)
@@ -36,7 +36,7 @@ default). The reason for reducing the runtime is to avoid starving the
 **rtla** tool. The tool is also set to run for *one minute*. The output
 histogram is set to group outputs in buckets of *10us* and *25* entries::
 
-  [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -e 25
+  [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -E 25
   # RTLA osnoise histogram
   # Time unit is microseconds (us)
   # Duration:   0 00:01:00
index 87a3604..2ca9204 100644 (file)
@@ -84,6 +84,8 @@ CPUfreq核心层注册一个cpufreq_driver结构体。
  .resume - 一个指向per-policy恢复函数的指针,该函数在关中断且在调节器再一次启动前被
  调用。
 
+ .ready - 一个指向per-policy准备函数的指针,该函数在策略完全初始化之后被调用。
+
  .attr - 一个指向NULL结尾的"struct freq_attr"列表的指针,该列表允许导出值到
  sysfs。
 
index a426710..9f31723 100644 (file)
@@ -1394,7 +1394,7 @@ documentation when it pops into existence).
 -------------------
 
 :Capability: KVM_CAP_ENABLE_CAP
-:Architectures: mips, ppc, s390
+:Architectures: mips, ppc, s390, x86
 :Type: vcpu ioctl
 :Parameters: struct kvm_enable_cap (in)
 :Returns: 0 on success; -1 on error
@@ -6997,6 +6997,20 @@ indicated by the fd to the VM this is called on.
 This is intended to support intra-host migration of VMs between userspace VMMs,
 upgrading the VMM process without interrupting the guest.
 
+7.30 KVM_CAP_PPC_AIL_MODE_3
+-------------------------------
+
+:Capability: KVM_CAP_PPC_AIL_MODE_3
+:Architectures: ppc
+:Type: vm
+
+This capability indicates that the kernel supports the mode 3 setting for the
+"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
+resource that is controlled with the H_SET_MODE hypercall.
+
+This capability allows a guest kernel to use a better-performance mode for
+handling interrupts and system calls.
+
 8. Other capabilities.
 ======================
 
index 69a2935..05fd080 100644 (file)
@@ -1620,6 +1620,7 @@ M:        Olof Johansson <olof@lixom.net>
 M:     soc@kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+C:     irc://irc.libera.chat/armlinux
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
 F:     arch/arm/boot/dts/Makefile
 F:     arch/arm64/boot/dts/Makefile
@@ -1627,6 +1628,7 @@ F:        arch/arm64/boot/dts/Makefile
 ARM SUB-ARCHITECTURES
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+C:     irc://irc.libera.chat/armlinux
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
 F:     arch/arm/mach-*/
 F:     arch/arm/plat-*/
@@ -1780,6 +1782,7 @@ F:        drivers/irqchip/irq-apple-aic.c
 F:     drivers/mailbox/apple-mailbox.c
 F:     drivers/pinctrl/pinctrl-apple-gpio.c
 F:     drivers/soc/apple/*
+F:     drivers/watchdog/apple_wdt.c
 F:     include/dt-bindings/interrupt-controller/apple-aic.h
 F:     include/dt-bindings/pinctrl/apple.h
 F:     include/linux/apple-mailbox.h
@@ -2251,7 +2254,7 @@ F:        drivers/phy/mediatek/
 ARM/Microchip (AT91) SoC support
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
-M:     Ludovic Desroches <ludovic.desroches@microchip.com>
+M:     Claudiu Beznea <claudiu.beznea@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 W:     http://www.linux4sam.org
@@ -2570,10 +2573,13 @@ N:      rockchip
 
 ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
+C:     irc://irc.libera.chat/linux-exynos
 Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
 F:     Documentation/arm/samsung/
 F:     Documentation/devicetree/bindings/arm/samsung/
 F:     Documentation/devicetree/bindings/power/pd-samsung.yaml
@@ -3133,11 +3139,9 @@ W:       https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
 F:     drivers/net/wireless/ath/ath5k/
 
 ATHEROS ATH6KL WIRELESS DRIVER
-M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
-S:     Supported
+S:     Orphan
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath6kl/
 
 ATI_REMOTE2 DRIVER
@@ -4543,6 +4547,7 @@ F:        drivers/platform/chrome/
 
 CHROMEOS EC CODEC DRIVER
 M:     Cheng-Yi Chiang <cychiang@chromium.org>
+M:     Tzung-Bi Shih <tzungbi@google.com>
 R:     Guenter Roeck <groeck@chromium.org>
 S:     Maintained
 F:     Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -4908,7 +4913,8 @@ F:        kernel/cgroup/cpuset.c
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
-M:     Vladimir Davydov <vdavydov.dev@gmail.com>
+M:     Roman Gushchin <roman.gushchin@linux.dev>
+M:     Shakeel Butt <shakeelb@google.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
@@ -5772,7 +5778,7 @@ F:        tools/testing/selftests/dma/
 
 DMA-BUF HEAPS FRAMEWORK
 M:     Sumit Semwal <sumit.semwal@linaro.org>
-R:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+R:     Benjamin Gaignard <benjamin.gaignard@collabora.com>
 R:     Liam Mark <lmark@codeaurora.org>
 R:     Laura Abbott <labbott@redhat.com>
 R:     Brian Starkey <Brian.Starkey@arm.com>
@@ -6502,7 +6508,7 @@ F:        Documentation/devicetree/bindings/display/rockchip/
 F:     drivers/gpu/drm/rockchip/
 
 DRM DRIVERS FOR STI
-M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M:     Alain Volmat <alain.volmat@foss.st.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6511,8 +6517,8 @@ F:        drivers/gpu/drm/sti
 
 DRM DRIVERS FOR STM
 M:     Yannick Fertre <yannick.fertre@foss.st.com>
+M:     Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
 M:     Philippe Cornu <philippe.cornu@foss.st.com>
-M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -7006,12 +7012,6 @@ L:       linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/sb_edac.c
 
-EDAC-SIFIVE
-M:     Yash Shah <yash.shah@sifive.com>
-L:     linux-edac@vger.kernel.org
-S:     Supported
-F:     drivers/edac/sifive_edac.c
-
 EDAC-SKYLAKE
 M:     Tony Luck <tony.luck@intel.com>
 L:     linux-edac@vger.kernel.org
@@ -7182,7 +7182,7 @@ F:        drivers/net/can/usb/etas_es58x/
 
 ETHERNET BRIDGE
 M:     Roopa Prabhu <roopa@nvidia.com>
-M:     Nikolay Aleksandrov <nikolay@nvidia.com>
+M:     Nikolay Aleksandrov <razor@blackwall.org>
 L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -7575,6 +7575,12 @@ S:       Maintained
 W:     http://floatingpoint.sourceforge.net/emulator/index.html
 F:     arch/x86/math-emu/
 
+FRAMEBUFFER CORE
+M:     Daniel Vetter <daniel@ffwll.ch>
+F:     drivers/video/fbdev/core/
+S:     Odd Fixes
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+
 FRAMEBUFFER LAYER
 M:     Helge Deller <deller@gmx.de>
 L:     linux-fbdev@vger.kernel.org
@@ -7738,8 +7744,7 @@ M:        Qiang Zhao <qiang.zhao@nxp.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/soc/fsl/qe/
-F:     include/soc/fsl/*qe*.h
-F:     include/soc/fsl/*ucc*.h
+F:     include/soc/fsl/qe/
 
 FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
 M:     Li Yang <leoyang.li@nxp.com>
@@ -7770,6 +7775,7 @@ F:        Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
 F:     Documentation/devicetree/bindings/soc/fsl/
 F:     drivers/soc/fsl/
 F:     include/linux/fsl/
+F:     include/soc/fsl/
 
 FREESCALE SOC FS_ENET DRIVER
 M:     Pantelis Antoniou <pantelis.antoniou@gmail.com>
@@ -9252,6 +9258,15 @@ S:       Maintained
 W:     https://github.com/o2genum/ideapad-slidebar
 F:     drivers/input/misc/ideapad_slidebar.c
 
+IDMAPPED MOUNTS
+M:     Christian Brauner <brauner@kernel.org>
+L:     linux-fsdevel@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
+F:     Documentation/filesystems/idmappings.rst
+F:     tools/testing/selftests/mount_setattr/
+F:     include/linux/mnt_idmapping.h
+
 IDT VersaClock 5 CLOCK DRIVER
 M:     Luca Ceresoli <luca@lucaceresoli.net>
 S:     Maintained
@@ -13303,8 +13318,8 @@ W:      http://www.iptables.org/
 W:     http://www.nftables.org/
 Q:     http://patchwork.ozlabs.org/project/netfilter-devel/list/
 C:     irc://irc.libera.chat/netfilter
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next.git
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
 F:     include/net/netfilter/
@@ -13571,7 +13586,7 @@ F:      tools/testing/selftests/nci/
 
 NFS, SUNRPC, AND LOCKD CLIENTS
 M:     Trond Myklebust <trond.myklebust@hammerspace.com>
-M:     Anna Schumaker <anna.schumaker@netapp.com>
+M:     Anna Schumaker <anna@kernel.org>
 L:     linux-nfs@vger.kernel.org
 S:     Maintained
 W:     http://client.linux-nfs.org
@@ -13684,7 +13699,7 @@ F:      scripts/nsdeps
 NTB AMD DRIVER
 M:     Sanjay R Mehta <sanju.mehta@amd.com>
 M:     Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-L:     linux-ntb@googlegroups.com
+L:     ntb@lists.linux.dev
 S:     Supported
 F:     drivers/ntb/hw/amd/
 
@@ -13692,7 +13707,7 @@ NTB DRIVER CORE
 M:     Jon Mason <jdmason@kudzu.us>
 M:     Dave Jiang <dave.jiang@intel.com>
 M:     Allen Hubbe <allenbh@gmail.com>
-L:     linux-ntb@googlegroups.com
+L:     ntb@lists.linux.dev
 S:     Supported
 W:     https://github.com/jonmason/ntb/wiki
 T:     git git://github.com/jonmason/ntb.git
@@ -13704,13 +13719,13 @@ F:    tools/testing/selftests/ntb/
 
 NTB IDT DRIVER
 M:     Serge Semin <fancer.lancer@gmail.com>
-L:     linux-ntb@googlegroups.com
+L:     ntb@lists.linux.dev
 S:     Supported
 F:     drivers/ntb/hw/idt/
 
 NTB INTEL DRIVER
 M:     Dave Jiang <dave.jiang@intel.com>
-L:     linux-ntb@googlegroups.com
+L:     ntb@lists.linux.dev
 S:     Supported
 W:     https://github.com/davejiang/linux/wiki
 T:     git https://github.com/davejiang/linux.git
@@ -14389,6 +14404,7 @@ M:      Rob Herring <robh+dt@kernel.org>
 M:     Frank Rowand <frowand.list@gmail.com>
 L:     devicetree@vger.kernel.org
 S:     Maintained
+C:     irc://irc.libera.chat/devicetree
 W:     http://www.devicetree.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
 F:     Documentation/ABI/testing/sysfs-firmware-ofw
@@ -14400,6 +14416,7 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
 L:     devicetree@vger.kernel.org
 S:     Maintained
+C:     irc://irc.libera.chat/devicetree
 Q:     http://patchwork.ozlabs.org/project/devicetree-bindings/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
 F:     Documentation/devicetree/
@@ -15133,7 +15150,7 @@ M:      Ingo Molnar <mingo@redhat.com>
 M:     Arnaldo Carvalho de Melo <acme@kernel.org>
 R:     Mark Rutland <mark.rutland@arm.com>
 R:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
-R:     Jiri Olsa <jolsa@redhat.com>
+R:     Jiri Olsa <jolsa@kernel.org>
 R:     Namhyung Kim <namhyung@kernel.org>
 L:     linux-perf-users@vger.kernel.org
 L:     linux-kernel@vger.kernel.org
@@ -15290,9 +15307,11 @@ PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
+R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
+C:     irc://irc.libera.chat/linux-exynos
 Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
 F:     Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -15549,6 +15568,7 @@ M:      Iurii Zaikin <yzaikin@google.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux.git sysctl-next
 F:     fs/proc/proc_sysctl.c
 F:     include/linux/sysctl.h
 F:     kernel/sysctl-test.c
@@ -15896,6 +15916,7 @@ S:      Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath10k/
+F:     Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
 
 QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
@@ -15903,11 +15924,12 @@ L:    ath11k@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath11k/
+F:     Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
 
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M:     ath9k-devel@qca.qualcomm.com
+M:     Toke Høiland-Jørgensen <toke@toke.dk>
 L:     linux-wireless@vger.kernel.org
-S:     Supported
+S:     Maintained
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
 F:     Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
 F:     drivers/net/wireless/ath/ath9k/
@@ -15982,14 +16004,6 @@ F:     Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
 F:     drivers/misc/fastrpc.c
 F:     include/uapi/misc/fastrpc.h
 
-QUALCOMM GENERIC INTERFACE I2C DRIVER
-M:     Akash Asthana <akashast@codeaurora.org>
-M:     Mukesh Savaliya <msavaliy@codeaurora.org>
-L:     linux-i2c@vger.kernel.org
-L:     linux-arm-msm@vger.kernel.org
-S:     Supported
-F:     drivers/i2c/busses/i2c-qcom-geni.c
-
 QUALCOMM HEXAGON ARCHITECTURE
 M:     Brian Cain <bcain@codeaurora.org>
 L:     linux-hexagon@vger.kernel.org
@@ -16061,8 +16075,8 @@ F:      Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
 F:     drivers/mtd/nand/raw/qcom_nandc.c
 
 QUALCOMM RMNET DRIVER
-M:     Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
-M:     Sean Tranchetti <stranche@codeaurora.org>
+M:     Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
+M:     Sean Tranchetti <quic_stranche@quicinc.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -16088,11 +16102,10 @@ F:    Documentation/devicetree/bindings/media/*venus*
 F:     drivers/media/platform/qcom/venus/
 
 QUALCOMM WCN36XX WIRELESS DRIVER
-M:     Kalle Valo <kvalo@kernel.org>
+M:     Loic Poulain <loic.poulain@linaro.org>
 L:     wcn36xx@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
-T:     git git://github.com/KrasnikovEugene/wcn36xx.git
 F:     drivers/net/wireless/ath/wcn36xx/
 
 QUANTENNA QTNFMAC WIRELESS DRIVER
@@ -16355,6 +16368,7 @@ F:      drivers/watchdog/realtek_otto_wdt.c
 
 REALTEK RTL83xx SMI DSA ROUTER CHIPS
 M:     Linus Walleij <linus.walleij@linaro.org>
+M:     Alvin Šipraga <alsi@bang-olufsen.dk>
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
 F:     drivers/net/dsa/realtek-smi*
@@ -16817,8 +16831,8 @@ F:      drivers/video/fbdev/savage/
 S390
 M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
-M:     Christian Borntraeger <borntraeger@linux.ibm.com>
-R:     Alexander Gordeev <agordeev@linux.ibm.com>
+M:     Alexander Gordeev <agordeev@linux.ibm.com>
+R:     Christian Borntraeger <borntraeger@linux.ibm.com>
 R:     Sven Schnelle <svens@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
@@ -17089,6 +17103,7 @@ SAMSUNG SOC CLOCK DRIVERS
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 M:     Tomasz Figa <tomasz.figa@gmail.com>
 M:     Chanwoo Choi <cw00.choi@samsung.com>
+R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-samsung-soc@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
@@ -17725,6 +17740,21 @@ S:     Maintained
 W:     http://www.winischhofer.at/linuxsisusbvga.shtml
 F:     drivers/usb/misc/sisusbvga/
 
+SL28 CPLD MFD DRIVER
+M:     Michael Walle <michael@walle.cc>
+S:     Maintained
+F:     Documentation/devicetree/bindings/gpio/kontron,sl28cpld-gpio.yaml
+F:     Documentation/devicetree/bindings/hwmon/kontron,sl28cpld-hwmon.yaml
+F:     Documentation/devicetree/bindings/interrupt-controller/kontron,sl28cpld-intc.yaml
+F:     Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
+F:     Documentation/devicetree/bindings/pwm/kontron,sl28cpld-pwm.yaml
+F:     Documentation/devicetree/bindings/watchdog/kontron,sl28cpld-wdt.yaml
+F:     drivers/gpio/gpio-sl28cpld.c
+F:     drivers/hwmon/sl28cpld-hwmon.c
+F:     drivers/irqchip/irq-sl28cpld.c
+F:     drivers/pwm/pwm-sl28cpld.c
+F:     drivers/watchdog/sl28cpld_wdt.c
+
 SLAB ALLOCATOR
 M:     Christoph Lameter <cl@linux.com>
 M:     Pekka Enberg <penberg@kernel.org>
@@ -17732,8 +17762,10 @@ M:     David Rientjes <rientjes@google.com>
 M:     Joonsoo Kim <iamjoonsoo.kim@lge.com>
 M:     Andrew Morton <akpm@linux-foundation.org>
 M:     Vlastimil Babka <vbabka@suse.cz>
+R:     Roman Gushchin <roman.gushchin@linux.dev>
 L:     linux-mm@kvack.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
 F:     include/linux/sl?b*.h
 F:     mm/sl?b*
 
@@ -18441,7 +18473,7 @@ F:      Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
 F:     sound/soc/sti/
 
 STI CEC DRIVER
-M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M:     Alain Volmat <alain.volmat@foss.st.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/media/stih-cec.txt
 F:     drivers/media/cec/platform/sti/
@@ -19595,6 +19627,14 @@ F:     Documentation/trace/timerlat-tracer.rst
 F:     Documentation/trace/hwlat_detector.rst
 F:     arch/*/kernel/trace.c
 
+Real-time Linux Analysis (RTLA) tools
+M:     Daniel Bristot de Oliveira <bristot@kernel.org>
+M:     Steven Rostedt <rostedt@goodmis.org>
+L:     linux-trace-devel@vger.kernel.org
+S:     Maintained
+F:     Documentation/tools/rtla/
+F:     tools/tracing/rtla/
+
 TRADITIONAL CHINESE DOCUMENTATION
 M:     Hu Haowen <src.res@email.cn>
 L:     linux-doc-tw-discuss@lists.sourceforge.net
@@ -21428,7 +21468,6 @@ THE REST
 M:     Linus Torvalds <torvalds@linux-foundation.org>
 L:     linux-kernel@vger.kernel.org
 S:     Buried alive in reporters
-Q:     http://patchwork.kernel.org/project/LKML/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
 F:     *
 F:     */
index ceb987e..87f6724 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Gobble Gobble
+EXTRAVERSION = -rc7
+NAME = Superb Owl
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 235ad55..e41eca7 100644 (file)
@@ -806,6 +806,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
        logicpd-som-lv-37xx-devkit.dtb \
        omap3430-sdp.dtb \
        omap3-beagle.dtb \
+       omap3-beagle-ab4.dtb \
        omap3-beagle-xm.dtb \
        omap3-beagle-xm-ab.dtb \
        omap3-cm-t3517.dtb \
index 673159d..f957fea 100644 (file)
@@ -55,7 +55,7 @@
                2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */
        >;
        tx-num-evt = <16>;
-       rt-num-evt = <16>;
+       rx-num-evt = <16>;
        status = "okay";
 };
 
index 6b485cb..42bff11 100644 (file)
                target-module@48210000 {
                        compatible = "ti,sysc-omap4-simple", "ti,sysc";
                        power-domains = <&prm_mpu>;
-                       clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>;
+                       clocks = <&mpu_clkctrl DRA7_MPU_MPU_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                              <0x58000014 4>;
                        reg-names = "rev", "syss";
                        ti,syss-mask = <1>;
-                       clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>,
-                                <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
-                                <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>,
-                                <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>;
+                       clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 0>,
+                                <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+                                <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>,
+                                <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 11>;
                        clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
                        #address-cells = <1>;
                        #size-cells = <1>;
                                                         SYSC_OMAP2_SOFTRESET |
                                                         SYSC_OMAP2_AUTOIDLE)>;
                                        ti,syss-mask = <1>;
-                                       clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+                                       clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
                                        clock-names = "fck";
                                        #address-cells = <1>;
                                        #size-cells = <1>;
                                                        <SYSC_IDLE_SMART>,
                                                        <SYSC_IDLE_SMART_WKUP>;
                                        ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
-                                       clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
-                                                <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+                                       clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+                                                <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
                                        clock-names = "fck", "dss_clk";
                                        #address-cells = <1>;
                                        #size-cells = <1>;
                                compatible = "vivante,gc";
                                reg = <0x0 0x700>;
                                interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&dss_clkctrl DRA7_BB2D_CLKCTRL 0>;
+                               clocks = <&dss_clkctrl DRA7_DSS_BB2D_CLKCTRL 0>;
                                clock-names = "core";
                        };
                };
        ti,no-reset-on-init;
        ti,no-idle;
        timer@0 {
-               assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>;
+               assigned-clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER1_CLKCTRL 24>;
                assigned-clock-parents = <&sys_32k_ck>;
        };
 };
index 8cbaf1c..3b609d9 100644 (file)
@@ -79,7 +79,6 @@
                                                MX23_PAD_LCD_RESET__GPIO_1_18
                                                MX23_PAD_PWM3__GPIO_1_29
                                                MX23_PAD_PWM4__GPIO_1_30
-                                               MX23_PAD_SSP1_DETECT__SSP1_DETECT
                                        >;
                                        fsl,drive-strength = <MXS_DRIVE_4mA>;
                                        fsl,voltage = <MXS_VOLTAGE_HIGH>;
index d07d8f8..ccfa8e3 100644 (file)
@@ -5,6 +5,8 @@
  * Author: Fabio Estevam <fabio.estevam@freescale.com>
  */
 
+#include <dt-bindings/gpio/gpio.h>
+
 / {
        aliases {
                backlight = &backlight;
                                MX6QDL_PAD_SD3_DAT1__SD3_DATA1          0x17059
                                MX6QDL_PAD_SD3_DAT2__SD3_DATA2          0x17059
                                MX6QDL_PAD_SD3_DAT3__SD3_DATA3          0x17059
+                               MX6QDL_PAD_SD3_DAT5__GPIO7_IO00         0x1b0b0
                        >;
                };
 
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       non-removable;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index b7ea37a..bcec98b 100644 (file)
                        interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
                        assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
-                       assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
+                       assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
                        timeout-sec = <40>;
                };
 
index 3be7cba..26eaba3 100644 (file)
@@ -59,7 +59,7 @@
                        };
 
                        uart_A: serial@84c0 {
-                               compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+                               compatible = "amlogic,meson6-uart";
                                reg = <0x84c0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                fifo-size = <128>;
@@ -67,7 +67,7 @@
                        };
 
                        uart_B: serial@84dc {
-                               compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+                               compatible = "amlogic,meson6-uart";
                                reg = <0x84dc 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_C: serial@8700 {
-                               compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+                               compatible = "amlogic,meson6-uart";
                                reg = <0x8700 0x18>;
                                interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_AO: serial@4c0 {
-                               compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart";
+                               compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart";
                                reg = <0x4c0 0x18>;
                                interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
index f80ddc9..9997a5d 100644 (file)
 };
 
 &uart_AO {
-       compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart";
+       clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_A {
-       compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8-uart";
+       clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_B {
-       compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8-uart";
+       clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_C {
-       compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8-uart";
+       clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &usb0 {
index b49b7cb..94f1c03 100644 (file)
 };
 
 &uart_AO {
-       compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart";
+       clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_A {
-       compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8b-uart";
+       clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_B {
-       compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8b-uart";
+       clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &uart_C {
-       compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
-       clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
-       clock-names = "baud", "xtal", "pclk";
+       compatible = "amlogic,meson8b-uart";
+       clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+       clock-names = "xtal", "pclk", "baud";
 };
 
 &usb0 {
diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts
new file mode 100644 (file)
index 0000000..990ff2d
--- /dev/null
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+
+#include "omap3-beagle.dts"
+
+/ {
+       model = "TI OMAP3 BeagleBoard A to B4";
+       compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
+};
+
+/*
+ * Workaround for capacitor C70 issue, see "Boards revision A and < B5"
+ * section at https://elinux.org/BeagleBoard_Community
+ */
+
+/* Unusable as clocksource because of unreliable oscillator */
+&counter32k {
+       status = "disabled";
+};
+
+/* Unusable as clockevent because of unreliable oscillator, allow to idle */
+&timer1_target {
+       /delete-property/ti,no-reset-on-init;
+       /delete-property/ti,no-idle;
+       timer@0 {
+               /delete-property/ti,timer-alwon;
+       };
+};
+
+/* Preferred always-on timer for clocksource */
+&timer12_target {
+       ti,no-reset-on-init;
+       ti,no-idle;
+       timer@0 {
+               /* Always clocked by secure_32k_fck */
+       };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+       ti,no-reset-on-init;
+       ti,no-idle;
+       timer@0 {
+               assigned-clocks = <&gpt2_fck>;
+               assigned-clock-parents = <&sys_ck>;
+       };
+};
index f9f34b8..0548b39 100644 (file)
        phys = <0 &hsusb2_phy>;
 };
 
-/* Unusable as clocksource because of unreliable oscillator */
-&counter32k {
-       status = "disabled";
-};
-
-/* Unusable as clockevent because if unreliable oscillator, allow to idle */
-&timer1_target {
-       /delete-property/ti,no-reset-on-init;
-       /delete-property/ti,no-idle;
-       timer@0 {
-               /delete-property/ti,timer-alwon;
-       };
-};
-
-/* Preferred always-on timer for clocksource */
-&timer12_target {
-       ti,no-reset-on-init;
-       ti,no-idle;
-       timer@0 {
-               /* Always clocked by secure_32k_fck */
-       };
-};
-
-/* Preferred timer for clockevent */
-&timer2_target {
-       ti,no-reset-on-init;
-       ti,no-idle;
-       timer@0 {
-               assigned-clocks = <&gpt2_fck>;
-               assigned-clock-parents = <&sys_ck>;
-       };
-};
-
 &twl_gpio {
        ti,use-leds;
        /* pullups: BIT(1) */
index 5e55198..54cd373 100644 (file)
        status = "disabled";
 };
 
+/* Unusable as clockevent because if unreliable oscillator, allow to idle */
+&timer1_target {
+       /delete-property/ti,no-reset-on-init;
+       /delete-property/ti,no-idle;
+       timer@0 {
+               /delete-property/ti,timer-alwon;
+       };
+};
+
+/* Preferred timer for clockevent */
+&timer12_target {
+       ti,no-reset-on-init;
+       ti,no-idle;
+       timer@0 {
+               /* Always clocked by secure_32k_fck */
+       };
+};
+
 &twl_gpio {
        ti,use-leds;
        /*
index c2995a2..162d072 100644 (file)
                display2 = &tv0;
        };
 };
-
-/* Unusable as clocksource because of unreliable oscillator */
-&counter32k {
-       status = "disabled";
-};
-
-/* Unusable as clockevent because if unreliable oscillator, allow to idle */
-&timer1_target {
-       /delete-property/ti,no-reset-on-init;
-       /delete-property/ti,no-idle;
-       timer@0 {
-               /delete-property/ti,timer-alwon;
-       };
-};
-
-/* Preferred always-on timer for clocksource */
-&timer12_target {
-       ti,no-reset-on-init;
-       ti,no-idle;
-       timer@0 {
-               /* Always clocked by secure_32k_fck */
-       };
-};
-
-/* Preferred timer for clockevent */
-&timer2_target {
-       ti,no-reset-on-init;
-       ti,no-idle;
-       timer@0 {
-               assigned-clocks = <&gpt2_fck>;
-               assigned-clock-parents = <&sys_ck>;
-       };
-};
index 8eed9e3..5868eb5 100644 (file)
                interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                assigned-clocks = <&cru SCLK_HDMI_PHY>;
                assigned-clock-parents = <&hdmi_phy>;
-               clocks = <&cru SCLK_HDMI_HDCP>, <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_CEC>;
-               clock-names = "isfr", "iahb", "cec";
+               clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
+               clock-names = "iahb", "isfr", "cec";
                pinctrl-names = "default";
                pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>;
                resets = <&cru SRST_HDMI_P>;
index aaaa618..45a9d9b 100644 (file)
                status = "disabled";
        };
 
-       crypto: cypto-controller@ff8a0000 {
+       crypto: crypto@ff8a0000 {
                compatible = "rockchip,rk3288-crypto";
                reg = <0x0 0xff8a0000 0x0 0x4000>;
                interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
index 367ba48..b587e4e 100644 (file)
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        reg = <0x41>;
-                                       irq-over-gpio;
                                        irq-gpios = <&gpiopinctrl 29 0x4>;
                                        id = <0>;
                                        blocks = <0x5>;
index 580ca49..f8c5899 100644 (file)
                        cap-sd-highspeed;
                        cap-mmc-highspeed;
                        /* All direction control is used */
-                       st,sig-dir-cmd;
-                       st,sig-dir-dat0;
-                       st,sig-dir-dat2;
-                       st,sig-dir-dat31;
                        st,sig-pin-fbclk;
                        full-pwr-cycle;
                        vmmc-supply = <&ab8500_ldo_aux3_reg>;
index 1d2aac2..fdc1d64 100644 (file)
                     "google,nyan-big-rev1", "google,nyan-big-rev0",
                     "google,nyan-big", "google,nyan", "nvidia,tegra124";
 
-       panel: panel {
-               compatible = "auo,b133xtn01";
-
-               power-supply = <&vdd_3v3_panel>;
-               backlight = <&backlight>;
-               ddc-i2c-bus = <&dpaux>;
+       host1x@50000000 {
+               dpaux@545c0000 {
+                       aux-bus {
+                               panel: panel {
+                                       compatible = "auo,b133xtn01";
+                                       backlight = <&backlight>;
+                               };
+                       };
+               };
        };
 
        mmc@700b0400 { /* SD Card on this bus */
index 677babd..abdf445 100644 (file)
                     "google,nyan-blaze-rev0", "google,nyan-blaze",
                     "google,nyan", "nvidia,tegra124";
 
-       panel: panel {
-               compatible = "samsung,ltn140at29-301";
-
-               power-supply = <&vdd_3v3_panel>;
-               backlight = <&backlight>;
-               ddc-i2c-bus = <&dpaux>;
+       host1x@50000000 {
+               dpaux@545c0000 {
+                       aux-bus {
+                               panel: panel {
+                                       compatible = "samsung,ltn140at29-301";
+                                       backlight = <&backlight>;
+                               };
+                       };
+               };
        };
 
        sound {
index 232c906..6a9592c 100644 (file)
                dpaux@545c0000 {
                        vdd-supply = <&vdd_3v3_panel>;
                        status = "okay";
+
+                       aux-bus {
+                               panel: panel {
+                                       compatible = "lg,lp129qe";
+                                       backlight = <&backlight>;
+                               };
+                       };
                };
        };
 
                };
        };
 
-       panel: panel {
-               compatible = "lg,lp129qe";
-               power-supply = <&vdd_3v3_panel>;
-               backlight = <&backlight>;
-               ddc-i2c-bus = <&dpaux>;
-       };
-
        vdd_mux: regulator-mux {
                compatible = "regulator-fixed";
                regulator-name = "+VDD_MUX";
index 6fe6796..b50cc01 100644 (file)
        .endm
 #endif
 
+#if __LINUX_ARM_ARCH__ < 7
+       .macro  dsb, args
+       mcr     p15, 0, r0, c7, c10, 4
+       .endm
+
+       .macro  isb, args
+       mcr     p15, 0, r0, c7, r5, 4
+       .endm
+#endif
+
        .macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
        .if \save
diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
new file mode 100644 (file)
index 0000000..d1fa560
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+enum {
+       SPECTRE_UNAFFECTED,
+       SPECTRE_MITIGATED,
+       SPECTRE_VULNERABLE,
+};
+
+enum {
+       __SPECTRE_V2_METHOD_BPIALL,
+       __SPECTRE_V2_METHOD_ICIALLU,
+       __SPECTRE_V2_METHOD_SMC,
+       __SPECTRE_V2_METHOD_HVC,
+       __SPECTRE_V2_METHOD_LOOP8,
+};
+
+enum {
+       SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
+       SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
+       SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
+       SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+       SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
+};
+
+void spectre_v2_update_state(unsigned int state, unsigned int methods);
+
+int spectre_bhb_update_vectors(unsigned int method);
+
+#endif
index 4a91428..0ef21bf 100644 (file)
 #define ARM_MMU_DISCARD(x)     x
 #endif
 
+/* Set start/end symbol names to the LMA for the section */
+#define ARM_LMA(sym, section)                                          \
+       sym##_start = LOADADDR(section);                                \
+       sym##_end = LOADADDR(section) + SIZEOF(section)
+
 #define PROC_INFO                                                      \
                . = ALIGN(4);                                           \
                __proc_info_begin = .;                                  \
  * only thing that matters is their relative offsets
  */
 #define ARM_VECTORS                                                    \
-       __vectors_start = .;                                            \
-       .vectors 0xffff0000 : AT(__vectors_start) {                     \
-               *(.vectors)                                             \
+       __vectors_lma = .;                                              \
+       OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {            \
+               .vectors {                                              \
+                       *(.vectors)                                     \
+               }                                                       \
+               .vectors.bhb.loop8 {                                    \
+                       *(.vectors.bhb.loop8)                           \
+               }                                                       \
+               .vectors.bhb.bpiall {                                   \
+                       *(.vectors.bhb.bpiall)                          \
+               }                                                       \
        }                                                               \
-       . = __vectors_start + SIZEOF(.vectors);                         \
-       __vectors_end = .;                                              \
+       ARM_LMA(__vectors, .vectors);                                   \
+       ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);               \
+       ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);             \
+       . = __vectors_lma + SIZEOF(.vectors) +                          \
+               SIZEOF(.vectors.bhb.loop8) +                            \
+               SIZEOF(.vectors.bhb.bpiall);                            \
                                                                        \
-       __stubs_start = .;                                              \
-       .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {            \
+       __stubs_lma = .;                                                \
+       .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
                *(.stubs)                                               \
        }                                                               \
-       . = __stubs_start + SIZEOF(.stubs);                             \
-       __stubs_end = .;                                                \
+       ARM_LMA(__stubs, .stubs);                                       \
+       . = __stubs_lma + SIZEOF(.stubs);                               \
                                                                        \
        PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
 
index ae295a3..6ef3b53 100644 (file)
@@ -106,4 +106,6 @@ endif
 
 obj-$(CONFIG_HAVE_ARM_SMCCC)   += smccc-call.o
 
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
+
 extra-y := $(head-y) vmlinux.lds
index 5cd0578..676703c 100644 (file)
@@ -1002,12 +1002,11 @@ vector_\name:
        sub     lr, lr, #\correction
        .endif
 
-       @
-       @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
-       @ (parent CPSR)
-       @
+       @ Save r0, lr_<exception> (parent PC)
        stmia   sp, {r0, lr}            @ save r0, lr
-       mrs     lr, spsr
+
+       @ Save spsr_<exception> (parent CPSR)
+2:     mrs     lr, spsr
        str     lr, [sp, #8]            @ save spsr
 
        @
@@ -1028,6 +1027,44 @@ vector_\name:
        movs    pc, lr                  @ branch to handler in SVC mode
 ENDPROC(vector_\name)
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .subsection 1
+       .align 5
+vector_bhb_loop8_\name:
+       .if \correction
+       sub     lr, lr, #\correction
+       .endif
+
+       @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}
+
+       @ bhb workaround
+       mov     r0, #8
+1:     b       . + 4
+       subs    r0, r0, #1
+       bne     1b
+       dsb
+       isb
+       b       2b
+ENDPROC(vector_bhb_loop8_\name)
+
+vector_bhb_bpiall_\name:
+       .if \correction
+       sub     lr, lr, #\correction
+       .endif
+
+       @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}
+
+       @ bhb workaround
+       mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
+       @ isb not needed due to "movs pc, lr" in the vector stub
+       @ which gives a "context synchronisation".
+       b       2b
+ENDPROC(vector_bhb_bpiall_\name)
+       .previous
+#endif
+
        .align  2
        @ handler addresses follow this label
 1:
@@ -1036,6 +1073,10 @@ ENDPROC(vector_\name)
        .section .stubs, "ax", %progbits
        @ This must be the first word
        .word   vector_swi
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .word   vector_bhb_loop8_swi
+       .word   vector_bhb_bpiall_swi
+#endif
 
 vector_rst:
  ARM(  swi     SYS_ERROR0      )
@@ -1150,8 +1191,10 @@ vector_addrexcptn:
  * FIQ "NMI" handler
  *-----------------------------------------------------------------------------
  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
- * systems.
+ * systems. This must be the last vector stub, so lets place it in its own
+ * subsection.
  */
+       .subsection 2
        vector_stub     fiq, FIQ_MODE, 4
 
        .long   __fiq_usr                       @  0  (USR_26 / USR_32)
@@ -1184,6 +1227,30 @@ vector_addrexcptn:
        W(b)    vector_irq
        W(b)    vector_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .section .vectors.bhb.loop8, "ax", %progbits
+.L__vectors_bhb_loop8_start:
+       W(b)    vector_rst
+       W(b)    vector_bhb_loop8_und
+       W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
+       W(b)    vector_bhb_loop8_pabt
+       W(b)    vector_bhb_loop8_dabt
+       W(b)    vector_addrexcptn
+       W(b)    vector_bhb_loop8_irq
+       W(b)    vector_bhb_loop8_fiq
+
+       .section .vectors.bhb.bpiall, "ax", %progbits
+.L__vectors_bhb_bpiall_start:
+       W(b)    vector_rst
+       W(b)    vector_bhb_bpiall_und
+       W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
+       W(b)    vector_bhb_bpiall_pabt
+       W(b)    vector_bhb_bpiall_dabt
+       W(b)    vector_addrexcptn
+       W(b)    vector_bhb_bpiall_irq
+       W(b)    vector_bhb_bpiall_fiq
+#endif
+
        .data
        .align  2
 
index ac86c34..dbc1913 100644 (file)
@@ -153,6 +153,29 @@ ENDPROC(ret_from_fork)
  *-----------------------------------------------------------------------------
  */
 
+       .align  5
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ENTRY(vector_bhb_loop8_swi)
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}
+       mov     r8, #8
+1:     b       2f
+2:     subs    r8, r8, #1
+       bne     1b
+       dsb
+       isb
+       b       3f
+ENDPROC(vector_bhb_loop8_swi)
+
+       .align  5
+ENTRY(vector_bhb_bpiall_swi)
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}
+       mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
+       isb
+       b       3f
+ENDPROC(vector_bhb_bpiall_swi)
+#endif
        .align  5
 ENTRY(vector_swi)
 #ifdef CONFIG_CPU_V7M
@@ -160,6 +183,7 @@ ENTRY(vector_swi)
 #else
        sub     sp, sp, #PT_REGS_SIZE
        stmia   sp, {r0 - r12}                  @ Calling r0 - r12
+3:
  ARM(  add     r8, sp, #S_PC           )
  ARM(  stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
  THUMB(        mov     r8, sp                  )
index 7bd30c0..22f937e 100644 (file)
@@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
        return 0;
 }
 
-static struct undef_hook kgdb_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_arm_hook = {
        .instr_mask             = 0xffffffff,
        .instr_val              = KGDB_BREAKINST,
-       .cpsr_mask              = MODE_MASK,
+       .cpsr_mask              = PSR_T_BIT | MODE_MASK,
        .cpsr_val               = SVC_MODE,
        .fn                     = kgdb_brk_fn
 };
 
-static struct undef_hook kgdb_compiled_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_thumb_hook = {
+       .instr_mask             = 0xffff,
+       .instr_val              = KGDB_BREAKINST & 0xffff,
+       .cpsr_mask              = PSR_T_BIT | MODE_MASK,
+       .cpsr_val               = PSR_T_BIT | SVC_MODE,
+       .fn                     = kgdb_brk_fn
+};
+
+static struct undef_hook kgdb_compiled_brkpt_arm_hook = {
        .instr_mask             = 0xffffffff,
        .instr_val              = KGDB_COMPILED_BREAK,
-       .cpsr_mask              = MODE_MASK,
+       .cpsr_mask              = PSR_T_BIT | MODE_MASK,
        .cpsr_val               = SVC_MODE,
        .fn                     = kgdb_compiled_brk_fn
 };
 
+static struct undef_hook kgdb_compiled_brkpt_thumb_hook = {
+       .instr_mask             = 0xffff,
+       .instr_val              = KGDB_COMPILED_BREAK & 0xffff,
+       .cpsr_mask              = PSR_T_BIT | MODE_MASK,
+       .cpsr_val               = PSR_T_BIT | SVC_MODE,
+       .fn                     = kgdb_compiled_brk_fn
+};
+
 static int __kgdb_notify(struct die_args *args, unsigned long cmd)
 {
        struct pt_regs *regs = args->regs;
@@ -210,8 +226,10 @@ int kgdb_arch_init(void)
        if (ret != 0)
                return ret;
 
-       register_undef_hook(&kgdb_brkpt_hook);
-       register_undef_hook(&kgdb_compiled_brkpt_hook);
+       register_undef_hook(&kgdb_brkpt_arm_hook);
+       register_undef_hook(&kgdb_brkpt_thumb_hook);
+       register_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+       register_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
 
        return 0;
 }
@@ -224,8 +242,10 @@ int kgdb_arch_init(void)
  */
 void kgdb_arch_exit(void)
 {
-       unregister_undef_hook(&kgdb_brkpt_hook);
-       unregister_undef_hook(&kgdb_compiled_brkpt_hook);
+       unregister_undef_hook(&kgdb_brkpt_arm_hook);
+       unregister_undef_hook(&kgdb_brkpt_thumb_hook);
+       unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+       unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
        unregister_die_notifier(&kgdb_notifier);
 }
 
diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
new file mode 100644 (file)
index 0000000..e7fea96
--- /dev/null
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bpf.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/spectre.h>
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+       return !sysctl_unprivileged_bpf_disabled;
+#else
+       return false
+#endif
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+static unsigned int spectre_v2_state;
+static unsigned int spectre_v2_methods;
+
+void spectre_v2_update_state(unsigned int state, unsigned int method)
+{
+       if (state > spectre_v2_state)
+               spectre_v2_state = state;
+       spectre_v2_methods |= method;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       const char *method;
+
+       if (spectre_v2_state == SPECTRE_UNAFFECTED)
+               return sprintf(buf, "%s\n", "Not affected");
+
+       if (spectre_v2_state != SPECTRE_MITIGATED)
+               return sprintf(buf, "%s\n", "Vulnerable");
+
+       if (_unprivileged_ebpf_enabled())
+               return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+       switch (spectre_v2_methods) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               method = "Branch predictor hardening";
+               break;
+
+       case SPECTRE_V2_METHOD_ICIALLU:
+               method = "I-cache invalidation";
+               break;
+
+       case SPECTRE_V2_METHOD_SMC:
+       case SPECTRE_V2_METHOD_HVC:
+               method = "Firmware call";
+               break;
+
+       case SPECTRE_V2_METHOD_LOOP8:
+               method = "History overwrite";
+               break;
+
+       default:
+               method = "Multiple mitigations";
+               break;
+       }
+
+       return sprintf(buf, "Mitigation: %s\n", method);
+}
index da04ed8..cae4a74 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/exception.h>
+#include <asm/spectre.h>
 #include <asm/unistd.h>
 #include <asm/traps.h>
 #include <asm/ptrace.h>
@@ -789,10 +790,59 @@ static inline void __init kuser_init(void *vectors)
 }
 #endif
 
+#ifndef CONFIG_CPU_V7M
+static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
+{
+       memcpy(vma, lma_start, lma_end - lma_start);
+}
+
+static void flush_vectors(void *vma, size_t offset, size_t size)
+{
+       unsigned long start = (unsigned long)vma + offset;
+       unsigned long end = start + size;
+
+       flush_icache_range(start, end);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+int spectre_bhb_update_vectors(unsigned int method)
+{
+       extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
+       extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
+       void *vec_start, *vec_end;
+
+       if (system_state >= SYSTEM_FREEING_INITMEM) {
+               pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
+                      smp_processor_id());
+               return SPECTRE_VULNERABLE;
+       }
+
+       switch (method) {
+       case SPECTRE_V2_METHOD_LOOP8:
+               vec_start = __vectors_bhb_loop8_start;
+               vec_end = __vectors_bhb_loop8_end;
+               break;
+
+       case SPECTRE_V2_METHOD_BPIALL:
+               vec_start = __vectors_bhb_bpiall_start;
+               vec_end = __vectors_bhb_bpiall_end;
+               break;
+
+       default:
+               pr_err("CPU%u: unknown Spectre BHB state %d\n",
+                      smp_processor_id(), method);
+               return SPECTRE_VULNERABLE;
+       }
+
+       copy_from_lma(vectors_page, vec_start, vec_end);
+       flush_vectors(vectors_page, 0, vec_end - vec_start);
+
+       return SPECTRE_MITIGATED;
+}
+#endif
+
 void __init early_trap_init(void *vectors_base)
 {
-#ifndef CONFIG_CPU_V7M
-       unsigned long vectors = (unsigned long)vectors_base;
        extern char __stubs_start[], __stubs_end[];
        extern char __vectors_start[], __vectors_end[];
        unsigned i;
@@ -813,17 +863,20 @@ void __init early_trap_init(void *vectors_base)
         * into the vector page, mapped at 0xffff0000, and ensure these
         * are visible to the instruction stream.
         */
-       memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
-       memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+       copy_from_lma(vectors_base, __vectors_start, __vectors_end);
+       copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
 
        kuser_init(vectors_base);
 
-       flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+       flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
+}
 #else /* ifndef CONFIG_CPU_V7M */
+void __init early_trap_init(void *vectors_base)
+{
        /*
         * on V7-M there is no need to copy the vector table to a dedicated
         * memory area. The address is configurable and so a table in the kernel
         * image can be used.
         */
-#endif
 }
+#endif
index 6daaa64..21413a9 100644 (file)
@@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
        }
 
        r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+       put_device(&pdev->dev);
        if (r) {
                pr_err("Unable to populate DSS submodule devices\n");
-               put_device(&pdev->dev);
                return r;
        }
 
index ccb0e37..31d1a21 100644 (file)
@@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void)
 
        for_each_matching_node(np, ti_clkctrl_match_table) {
                ret = _setup_clkctrl_provider(np);
-               if (ret)
+               if (ret) {
+                       of_node_put(np);
                        break;
+               }
        }
 
        return ret;
index 43ddec6..594edf9 100644 (file)
@@ -2,6 +2,7 @@
 menuconfig ARCH_INTEL_SOCFPGA
        bool "Altera SOCFPGA family"
        depends on ARCH_MULTI_V7
+       select ARCH_HAS_RESET_CONTROLLER
        select ARCH_SUPPORTS_BIG_ENDIAN
        select ARM_AMBA
        select ARM_GIC
@@ -18,6 +19,7 @@ menuconfig ARCH_INTEL_SOCFPGA
        select PL310_ERRATA_727915
        select PL310_ERRATA_753970 if PL310
        select PL310_ERRATA_769419
+       select RESET_CONTROLLER
 
 if ARCH_INTEL_SOCFPGA
 config SOCFPGA_SUSPEND
index 58afba3..9724c16 100644 (file)
@@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE
 
 config CPU_SPECTRE
        bool
+       select GENERIC_CPU_VULNERABILITIES
 
 config HARDEN_BRANCH_PREDICTOR
        bool "Harden the branch predictor against aliasing attacks" if EXPERT
@@ -850,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
 
           If unsure, say Y.
 
+config HARDEN_BRANCH_HISTORY
+       bool "Harden Spectre style attacks against branch history" if EXPERT
+       depends on CPU_SPECTRE
+       default y
+       help
+         Speculation attacks against some high-performance processors can
+         make use of branch history to influence future speculation. When
+         taking an exception, a sequence of branches overwrites the branch
+         history, or branch history is invalidated.
+
 config TLS_REG_EMUL
        bool
        select NEED_KUSER_HELPERS
index 274e4f7..5e2be37 100644 (file)
@@ -212,12 +212,14 @@ early_param("ecc", early_ecc);
 static int __init early_cachepolicy(char *p)
 {
        pr_warn("cachepolicy kernel parameter not supported without cp15\n");
+       return 0;
 }
 early_param("cachepolicy", early_cachepolicy);
 
 static int __init noalign_setup(char *__unused)
 {
        pr_warn("noalign kernel parameter not supported without cp15\n");
+       return 1;
 }
 __setup("noalign", noalign_setup);
 
index 114c05a..c226fea 100644 (file)
@@ -6,8 +6,35 @@
 #include <asm/cp15.h>
 #include <asm/cputype.h>
 #include <asm/proc-fns.h>
+#include <asm/spectre.h>
 #include <asm/system_misc.h>
 
+#ifdef CONFIG_ARM_PSCI
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+       switch ((int)res.a0) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               return SPECTRE_UNAFFECTED;
+
+       default:
+               return SPECTRE_VULNERABLE;
+       }
+}
+#else
+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
 
@@ -36,13 +63,60 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
        arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 }
 
-static void cpu_v7_spectre_init(void)
+static unsigned int spectre_v2_install_workaround(unsigned int method)
 {
        const char *spectre_v2_method = NULL;
        int cpu = smp_processor_id();
 
        if (per_cpu(harden_branch_predictor_fn, cpu))
-               return;
+               return SPECTRE_MITIGATED;
+
+       switch (method) {
+       case SPECTRE_V2_METHOD_BPIALL:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_bpiall;
+               spectre_v2_method = "BPIALL";
+               break;
+
+       case SPECTRE_V2_METHOD_ICIALLU:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+               break;
+
+       case SPECTRE_V2_METHOD_HVC:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       call_hvc_arch_workaround_1;
+               cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+               spectre_v2_method = "hypervisor";
+               break;
+
+       case SPECTRE_V2_METHOD_SMC:
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       call_smc_arch_workaround_1;
+               cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+               spectre_v2_method = "firmware";
+               break;
+       }
+
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
+
+       return SPECTRE_MITIGATED;
+}
+#else
+static unsigned int spectre_v2_install_workaround(unsigned int method)
+{
+       pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n");
+
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
+static void cpu_v7_spectre_v2_init(void)
+{
+       unsigned int state, method = 0;
 
        switch (read_cpuid_part()) {
        case ARM_CPU_PART_CORTEX_A8:
@@ -51,69 +125,133 @@ static void cpu_v7_spectre_init(void)
        case ARM_CPU_PART_CORTEX_A17:
        case ARM_CPU_PART_CORTEX_A73:
        case ARM_CPU_PART_CORTEX_A75:
-               per_cpu(harden_branch_predictor_fn, cpu) =
-                       harden_branch_predictor_bpiall;
-               spectre_v2_method = "BPIALL";
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_BPIALL;
                break;
 
        case ARM_CPU_PART_CORTEX_A15:
        case ARM_CPU_PART_BRAHMA_B15:
-               per_cpu(harden_branch_predictor_fn, cpu) =
-                       harden_branch_predictor_iciallu;
-               spectre_v2_method = "ICIALLU";
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_ICIALLU;
                break;
 
-#ifdef CONFIG_ARM_PSCI
        case ARM_CPU_PART_BRAHMA_B53:
                /* Requires no workaround */
+               state = SPECTRE_UNAFFECTED;
                break;
+
        default:
                /* Other ARM CPUs require no workaround */
-               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
+                       state = SPECTRE_UNAFFECTED;
                        break;
+               }
+
                fallthrough;
-               /* Cortex A57/A72 require firmware workaround */
-       case ARM_CPU_PART_CORTEX_A57:
-       case ARM_CPU_PART_CORTEX_A72: {
-               struct arm_smccc_res res;
 
-               arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
-                                    ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if ((int)res.a0 != 0)
-                       return;
+       /* Cortex A57/A72 require firmware workaround */
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72:
+               state = spectre_v2_get_cpu_fw_mitigation_state();
+               if (state != SPECTRE_MITIGATED)
+                       break;
 
                switch (arm_smccc_1_1_get_conduit()) {
                case SMCCC_CONDUIT_HVC:
-                       per_cpu(harden_branch_predictor_fn, cpu) =
-                               call_hvc_arch_workaround_1;
-                       cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
-                       spectre_v2_method = "hypervisor";
+                       method = SPECTRE_V2_METHOD_HVC;
                        break;
 
                case SMCCC_CONDUIT_SMC:
-                       per_cpu(harden_branch_predictor_fn, cpu) =
-                               call_smc_arch_workaround_1;
-                       cpu_do_switch_mm = cpu_v7_smc_switch_mm;
-                       spectre_v2_method = "firmware";
+                       method = SPECTRE_V2_METHOD_SMC;
                        break;
 
                default:
+                       state = SPECTRE_VULNERABLE;
                        break;
                }
        }
-#endif
+
+       if (state == SPECTRE_MITIGATED)
+               state = spectre_v2_install_workaround(method);
+
+       spectre_v2_update_state(state, method);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+static int spectre_bhb_method;
+
+static const char *spectre_bhb_method_name(int method)
+{
+       switch (method) {
+       case SPECTRE_V2_METHOD_LOOP8:
+               return "loop";
+
+       case SPECTRE_V2_METHOD_BPIALL:
+               return "BPIALL";
+
+       default:
+               return "unknown";
        }
+}
 
-       if (spectre_v2_method)
-               pr_info("CPU%u: Spectre v2: using %s workaround\n",
-                       smp_processor_id(), spectre_v2_method);
+static int spectre_bhb_install_workaround(int method)
+{
+       if (spectre_bhb_method != method) {
+               if (spectre_bhb_method) {
+                       pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
+                              smp_processor_id());
+
+                       return SPECTRE_VULNERABLE;
+               }
+
+               if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
+                       return SPECTRE_VULNERABLE;
+
+               spectre_bhb_method = method;
+       }
+
+       pr_info("CPU%u: Spectre BHB: using %s workaround\n",
+               smp_processor_id(), spectre_bhb_method_name(method));
+
+       return SPECTRE_MITIGATED;
 }
 #else
-static void cpu_v7_spectre_init(void)
+static int spectre_bhb_install_workaround(int method)
 {
+       return SPECTRE_VULNERABLE;
 }
 #endif
 
+static void cpu_v7_spectre_bhb_init(void)
+{
+       unsigned int state, method = 0;
+
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72:
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_LOOP8;
+               break;
+
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+
+       default:
+               state = SPECTRE_UNAFFECTED;
+               break;
+       }
+
+       if (state == SPECTRE_MITIGATED)
+               state = spectre_bhb_install_workaround(method);
+
+       spectre_v2_update_state(state, method);
+}
+
 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
                                                  u32 mask, const char *msg)
 {
@@ -142,16 +280,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
 void cpu_v7_ca8_ibe(void)
 {
        if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
-               cpu_v7_spectre_init();
+               cpu_v7_spectre_v2_init();
 }
 
 void cpu_v7_ca15_ibe(void)
 {
        if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
-               cpu_v7_spectre_init();
+               cpu_v7_spectre_v2_init();
 }
 
 void cpu_v7_bugs_init(void)
 {
-       cpu_v7_spectre_init();
+       cpu_v7_spectre_v2_init();
+       cpu_v7_spectre_bhb_init();
 }
index c963162..f979ec4 100644 (file)
@@ -672,6 +672,7 @@ config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
 
 config ARM64_ERRATUM_2051678
        bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
+       default y
        help
          This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
          Affected Coretex-A510 might not respect the ordering rules for
index 7d5d588..2169744 100644 (file)
@@ -309,9 +309,6 @@ config ARCH_VISCONTI
        help
          This enables support for Toshiba Visconti SoCs Family.
 
-config ARCH_VULCAN
-       def_bool n
-
 config ARCH_XGENE
        bool "AppliedMicro X-Gene SOC Family"
        help
index 517519e..f84d4b4 100644 (file)
                        no-map;
                };
 
+               /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+               secmon_reserved_bl32: secmon@5300000 {
+                       reg = <0x0 0x05300000 0x0 0x2000000>;
+                       no-map;
+               };
+
                linux,cma {
                        compatible = "shared-dma-pool";
                        reusable;
index d8838dd..4fb31c2 100644 (file)
                regulator-always-on;
        };
 
-       reserved-memory {
-               /* TEE Reserved Memory */
-               bl32_reserved: bl32@5000000 {
-                       reg = <0x0 0x05300000 0x0 0x2000000>;
-                       no-map;
-               };
-       };
-
        sdio_pwrseq: sdio-pwrseq {
                compatible = "mmc-pwrseq-simple";
                reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
index 3e968b2..fd3fa82 100644 (file)
@@ -17,7 +17,7 @@
                rtc1 = &vrtc;
        };
 
-       dioo2133: audio-amplifier-0 {
+       dio2133: audio-amplifier-0 {
                compatible = "simple-audio-amplifier";
                enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
                VCC-supply = <&vcc_5v>;
                audio-widgets = "Line", "Lineout";
                audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>,
                                 <&tdmin_b>, <&tdmin_c>, <&tdmin_lb>,
-                                <&dioo2133>;
+                                <&dio2133>;
                audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
                                "TDMOUT_B IN 1", "FRDDR_B OUT 1",
                                "TDMOUT_B IN 2", "FRDDR_C OUT 1",
index 6b457b2..aa14ea0 100644 (file)
                        no-map;
                };
 
+               /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+               secmon_reserved_bl32: secmon@5300000 {
+                       reg = <0x0 0x05300000 0x0 0x2000000>;
+                       no-map;
+               };
+
                linux,cma {
                        compatible = "shared-dma-pool";
                        reusable;
index 212c6aa..5751c48 100644 (file)
                regulator-min-microvolt = <1800000>;
                regulator-max-microvolt = <3300000>;
 
-               enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+               enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
                enable-active-high;
                regulator-always-on;
 
index 0bd1e98..ddb1b34 100644 (file)
@@ -48,7 +48,7 @@
                regulator-max-microvolt = <3300000>;
                vin-supply = <&vcc_5v>;
 
-               enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+               enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
                enable-active-high;
                regulator-always-on;
 
index 4274758..a5d79f2 100644 (file)
                regulator-always-on;
        };
 
-       reserved-memory {
-               /* TEE Reserved Memory */
-               bl32_reserved: bl32@5000000 {
-                       reg = <0x0 0x05300000 0x0 0x2000000>;
-                       no-map;
-               };
-       };
-
        sdio_pwrseq: sdio-pwrseq {
                compatible = "mmc-pwrseq-simple";
                reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
index 6288e10..a2635b1 100644 (file)
                         <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
                         <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
                /* Standard AXI Translation entries as programmed by EDK2 */
-               dma-ranges = <0x02000000 0x0 0x2c1c0000 0x0 0x2c1c0000 0x0 0x00040000>,
-                            <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>,
+               dma-ranges = <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>,
                             <0x43000000 0x8 0x00000000 0x8 0x00000000 0x2 0x00000000>;
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 7>;
index d74e738..c03f4e1 100644 (file)
        };
 };
 
+&ftm_alarm0 {
+       status = "okay";
+};
+
 &gpio1 {
        gpio-line-names =
                "", "", "", "", "", "", "", "",
index f77f90e..0c7a72c 100644 (file)
                                                clocks = <&clk IMX8MM_CLK_VPU_DEC_ROOT>;
                                                assigned-clocks = <&clk IMX8MM_CLK_VPU_BUS>;
                                                assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_800M>;
-                                               resets = <&src IMX8MQ_RESET_VPU_RESET>;
                                        };
 
                                        pgc_vpu_g1: power-domain@7 {
index f3e3418..2d4a472 100644 (file)
        status = "okay";
 
        ports {
-               port@1 {
-                       reg = <1>;
+               port@0 {
+                       reg = <0>;
 
                        mipi1_sensor_ep: endpoint {
                                remote-endpoint = <&camera1_ep>;
index 2df2510..e92ebb6 100644 (file)
                                assigned-clock-rates = <0>, <0>, <0>, <594000000>;
                                status = "disabled";
 
-                               port@0 {
+                               port {
                                        lcdif_mipi_dsi: endpoint {
                                                remote-endpoint = <&mipi_dsi_lcdif_in>;
                                        };
                                        #address-cells = <1>;
                                        #size-cells = <0>;
 
-                                       port@0 {
-                                               reg = <0>;
+                                       port@1 {
+                                               reg = <1>;
 
                                                csi1_mipi_ep: endpoint {
                                                        remote-endpoint = <&csi1_ep>;
                                        #address-cells = <1>;
                                        #size-cells = <0>;
 
-                                       port@0 {
-                                               reg = <0>;
+                                       port@1 {
+                                               reg = <1>;
 
                                                csi2_mipi_ep: endpoint {
                                                        remote-endpoint = <&csi2_ep>;
index a987ff7..09f7364 100644 (file)
 
                        scmi_sensor: protocol@15 {
                                reg = <0x15>;
-                               #thermal-sensor-cells = <0>;
+                               #thermal-sensor-cells = <1>;
                        };
                };
        };
index f27e3c8..ce6d5bd 100644 (file)
@@ -91,7 +91,7 @@
 
        sound {
                compatible = "fsl,imx-audio-tlv320aic32x4";
-               model = "tqm-tlv320aic32";
+               model = "imx-audio-tlv320aic32x4";
                ssi-controller = <&sai3>;
                audio-codec = <&tlv320aic3x04>;
        };
index 0dd2d2e..f4270cf 100644 (file)
                };
 
                usb0: usb@ffb00000 {
-                       compatible = "snps,dwc2";
+                       compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2";
                        reg = <0xffb00000 0x40000>;
                        interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
                        phys = <&usbphy0>;
                };
 
                usb1: usb@ffb40000 {
-                       compatible = "snps,dwc2";
+                       compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2";
                        reg = <0xffb40000 0x40000>;
                        interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
                        phys = <&usbphy0>;
index f972704..56dfbb2 100644 (file)
                clock-names = "pclk", "timer";
        };
 
-       dmac: dmac@ff240000 {
+       dmac: dma-controller@ff240000 {
                compatible = "arm,pl330", "arm,primecell";
                reg = <0x0 0xff240000 0x0 0x4000>;
                interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
index 39db0b8..b822533 100644 (file)
                status = "disabled";
        };
 
-       dmac: dmac@ff1f0000 {
+       dmac: dma-controller@ff1f0000 {
                compatible = "arm,pl330", "arm,primecell";
                reg = <0x0 0xff1f0000 0x0 0x4000>;
                interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
index 45a5ae5..162f08b 100644 (file)
 
        sound: sound {
                compatible = "rockchip,rk3399-gru-sound";
-               rockchip,cpu = <&i2s0 &i2s2>;
+               rockchip,cpu = <&i2s0 &spdif>;
        };
 };
 
@@ -437,10 +437,6 @@ ap_i2c_audio: &i2c8 {
        status = "okay";
 };
 
-&i2s2 {
-       status = "okay";
-};
-
 &io_domains {
        status = "okay";
 
@@ -537,6 +533,17 @@ ap_i2c_audio: &i2c8 {
        vqmmc-supply = <&ppvar_sd_card_io>;
 };
 
+&spdif {
+       status = "okay";
+
+       /*
+        * SPDIF is routed internally to DP; we either don't use these pins, or
+        * mux them to something else.
+        */
+       /delete-property/ pinctrl-0;
+       /delete-property/ pinctrl-names;
+};
+
 &spi1 {
        status = "okay";
 
index 292bb7e..3ae5d72 100644 (file)
 
 &usbdrd_dwc3_0 {
        dr_mode = "otg";
+       extcon = <&extcon_usb3>;
        status = "okay";
 };
 
index fb67db4..08fa003 100644 (file)
                };
        };
 
+       extcon_usb3: extcon-usb3 {
+               compatible = "linux,extcon-usb-gpio";
+               id-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&usb3_id>;
+       };
+
        clkin_gmac: external-gmac-clock {
                compatible = "fixed-clock";
                clock-frequency = <125000000>;
                          <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
                };
        };
+
+       usb3 {
+               usb3_id: usb3-id {
+                       rockchip,pins =
+                         <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
 };
 
 &sdhci {
+       /*
+        * Signal integrity isn't great at 200MHz but 100MHz has proven stable
+        * enough.
+        */
+       max-frequency = <100000000>;
+
        bus-width = <8>;
        mmc-hs400-1_8v;
        mmc-hs400-enhanced-strobe;
index d3cdf6f..080457a 100644 (file)
                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
                clocks = <&cru PCLK_HDMI_CTRL>,
                         <&cru SCLK_HDMI_SFR>,
-                        <&cru PLL_VPLL>,
+                        <&cru SCLK_HDMI_CEC>,
                         <&cru PCLK_VIO_GRF>,
-                        <&cru SCLK_HDMI_CEC>;
-               clock-names = "iahb", "isfr", "vpll", "grf", "cec";
+                        <&cru PLL_VPLL>;
+               clock-names = "iahb", "isfr", "cec", "grf", "vpll";
                power-domains = <&power RK3399_PD_HDCP>;
                reg-io-width = <4>;
                rockchip,grf = <&grf>;
index 166399b..d9eb92d 100644 (file)
                        vcc_ddr: DCDC_REG3 {
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-min-microvolt = <1100000>;
-                               regulator-max-microvolt = <1100000>;
                                regulator-initial-mode = <0x2>;
                                regulator-name = "vcc_ddr";
                                regulator-state-mem {
index 2fd313a..d91df1c 100644 (file)
                clocks = <&cru SCLK_GMAC0>, <&cru SCLK_GMAC0_RX_TX>,
                         <&cru SCLK_GMAC0_RX_TX>, <&cru CLK_MAC0_REFOUT>,
                         <&cru ACLK_GMAC0>, <&cru PCLK_GMAC0>,
-                        <&cru SCLK_GMAC0_RX_TX>, <&cru CLK_GMAC0_PTP_REF>,
-                        <&cru PCLK_XPCS>;
+                        <&cru SCLK_GMAC0_RX_TX>, <&cru CLK_GMAC0_PTP_REF>;
                clock-names = "stmmaceth", "mac_clk_rx",
                              "mac_clk_tx", "clk_mac_refout",
                              "aclk_mac", "pclk_mac",
-                             "clk_mac_speed", "ptp_ref",
-                             "pclk_xpcs";
+                             "clk_mac_speed", "ptp_ref";
                resets = <&cru SRST_A_GMAC0>;
                reset-names = "stmmaceth";
                rockchip,grf = <&grf>;
index a68033a..8ccce54 100644 (file)
                status = "disabled";
        };
 
-       dmac0: dmac@fe530000 {
+       dmac0: dma-controller@fe530000 {
                compatible = "arm,pl330", "arm,primecell";
                reg = <0x0 0xfe530000 0x0 0x4000>;
                interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
                #dma-cells = <1>;
        };
 
-       dmac1: dmac@fe550000 {
+       dmac1: dma-controller@fe550000 {
                compatible = "arm,pl330", "arm,primecell";
                reg = <0x0 0xfe550000 0x0 0x4000>;
                interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
index a5a24f9..b210cc0 100644 (file)
        model = "Texas Instruments J721S2 EVM";
 
        chosen {
-               stdout-path = "serial10:115200n8";
-               bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000";
+               stdout-path = "serial2:115200n8";
+               bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,2880000";
+       };
+
+       aliases {
+               serial1 = &mcu_uart0;
+               serial2 = &main_uart8;
+               mmc0 = &main_sdhci0;
+               mmc1 = &main_sdhci1;
+               can0 = &main_mcan16;
+               can1 = &mcu_mcan0;
+               can2 = &mcu_mcan1;
        };
 
        evm_12v0: fixedregulator-evm12v0 {
index 80d3cae..fe5234c 100644 (file)
        #address-cells = <2>;
        #size-cells = <2>;
 
-       aliases {
-               serial0 = &wkup_uart0;
-               serial1 = &mcu_uart0;
-               serial2 = &main_uart0;
-               serial3 = &main_uart1;
-               serial4 = &main_uart2;
-               serial5 = &main_uart3;
-               serial6 = &main_uart4;
-               serial7 = &main_uart5;
-               serial8 = &main_uart6;
-               serial9 = &main_uart7;
-               serial10 = &main_uart8;
-               serial11 = &main_uart9;
-               mmc0 = &main_sdhci0;
-               mmc1 = &main_sdhci1;
-               can0 = &main_mcan16;
-               can1 = &mcu_mcan0;
-               can2 = &mcu_mcan1;
-               can3 = &main_mcan3;
-               can4 = &main_mcan5;
-       };
-
        chosen { };
 
        cpus {
index 3198acb..7f3c87f 100644 (file)
        msr_s   SYS_ICC_SRE_EL2, x0
        isb                                     // Make sure SRE is now set
        mrs_s   x0, SYS_ICC_SRE_EL2             // Read SRE back,
-       tbz     x0, #0, 1f                      // and check that it sticks
+       tbz     x0, #0, .Lskip_gicv3_\@         // and check that it sticks
        msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICC_HCR_EL2 to defaults
 .Lskip_gicv3_\@:
 .endm
index 14b9726..5918095 100644 (file)
@@ -46,8 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
         * specification (ARM DEN 0022A). This means all suspend states
         * for KVM will preserve the register state.
         */
-       kvm_vcpu_halt(vcpu);
-       kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+       kvm_vcpu_wfi(vcpu);
 
        return PSCI_RET_SUCCESS;
 }
index 7068da0..49837d3 100644 (file)
@@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                                    IRQCHIP_STATE_PENDING,
                                                    &val);
                        WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+               } else if (vgic_irq_is_mapped_level(irq)) {
+                       val = vgic_get_phys_line_level(irq);
                } else {
                        val = irq_is_pending(irq);
                }
index 3e336b3..ab6e3dc 100644 (file)
@@ -83,6 +83,8 @@
                label = "HDMI OUT";
                type = "a";
 
+               ddc-en-gpios = <&gpa 25 GPIO_ACTIVE_HIGH>;
+
                port {
                        hdmi_con: endpoint {
                                remote-endpoint = <&dw_hdmi_out>;
                gpio = <&gpf 14 GPIO_ACTIVE_LOW>;
                enable-active-high;
        };
-
-       hdmi_power: fixedregulator@3 {
-               compatible = "regulator-fixed";
-
-               regulator-name = "hdmi_power";
-               regulator-min-microvolt = <5000000>;
-               regulator-max-microvolt = <5000000>;
-
-               gpio = <&gpa 25 0>;
-               enable-active-high;
-       };
 };
 
 &ext {
        pinctrl-names = "default";
        pinctrl-0 = <&pins_hdmi_ddc>;
 
-       hdmi-5v-supply = <&hdmi_power>;
-
        ports {
                #address-cells = <1>;
                #size-cells = <0>;
index f979adf..ef73ba1 100644 (file)
@@ -803,7 +803,7 @@ early_param("coherentio", setcoherentio);
 
 static int __init setnocoherentio(char *str)
 {
-       dma_default_coherent = true;
+       dma_default_coherent = false;
        pr_info("Software DMA cache coherency (command line)\n");
        return 0;
 }
index d542fb7..1986d13 100644 (file)
@@ -351,6 +351,9 @@ asmlinkage void start_secondary(void)
        cpu = smp_processor_id();
        cpu_data[cpu].udelay_val = loops_per_jiffy;
 
+       set_cpu_sibling_map(cpu);
+       set_cpu_core_map(cpu);
+
        cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
 
@@ -362,9 +365,6 @@ asmlinkage void start_secondary(void)
        /* The CPU is running and counters synchronised, now mark it online */
        set_cpu_online(cpu, true);
 
-       set_cpu_sibling_map(cpu);
-       set_cpu_core_map(cpu);
-
        calculate_cpu_foreign_map();
 
        /*
index d6efffd..fb0565b 100644 (file)
@@ -22,7 +22,9 @@
 
 #include "common.h"
 
-static void *detect_magic __initdata = detect_memory_region;
+#define MT7621_MEM_TEST_PATTERN         0xaa5555aa
+
+static u32 detect_magic __initdata;
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
@@ -58,24 +60,32 @@ phys_addr_t mips_cpc_default_phys_base(void)
        panic("Cannot detect cpc address");
 }
 
+static bool __init mt7621_addr_wraparound_test(phys_addr_t size)
+{
+       void *dm = (void *)KSEG1ADDR(&detect_magic);
+
+       if (CPHYSADDR(dm + size) >= MT7621_LOWMEM_MAX_SIZE)
+               return true;
+       __raw_writel(MT7621_MEM_TEST_PATTERN, dm);
+       if (__raw_readl(dm) != __raw_readl(dm + size))
+               return false;
+       __raw_writel(~MT7621_MEM_TEST_PATTERN, dm);
+       return __raw_readl(dm) == __raw_readl(dm + size);
+}
+
 static void __init mt7621_memory_detect(void)
 {
-       void *dm = &detect_magic;
        phys_addr_t size;
 
-       for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) {
-               if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic)))
-                       break;
+       for (size = 32 * SZ_1M; size <= 256 * SZ_1M; size <<= 1) {
+               if (mt7621_addr_wraparound_test(size)) {
+                       memblock_add(MT7621_LOWMEM_BASE, size);
+                       return;
+               }
        }
 
-       if ((size == 256 * SZ_1M) &&
-           (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) &&
-           __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) {
-               memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE);
-               memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE);
-       } else {
-               memblock_add(MT7621_LOWMEM_BASE, size);
-       }
+       memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE);
+       memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE);
 }
 
 void __init ralink_of_remap(void)
index 0ec9cfc..56ffd26 100644 (file)
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 
+/* compiler build environment sanity checks: */
+#if !defined(CONFIG_64BIT) && defined(__LP64__)
+#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
+#endif
+#if defined(CONFIG_64BIT) && !defined(__LP64__)
+#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
+#endif
+
 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
  * on use of volatile and __*_bit() (set/clear/change):
  *     *_bit() want use of volatile.
index ebf8a84..123d5f1 100644 (file)
@@ -89,8 +89,8 @@ struct exception_table_entry {
        __asm__("1: " ldx " 0(" sr "%2),%0\n"           \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));             \
+               : "=r"(__gu_val), "+r"(__gu_err)        \
+               : "r"(ptr));                            \
                                                        \
        (val) = (__force __typeof__(*(ptr))) __gu_val;  \
 }
@@ -123,8 +123,8 @@ struct exception_table_entry {
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
-               : "=&r"(__gu_tmp.l), "=r"(__gu_err)     \
-               : "r"(ptr), "1"(__gu_err));             \
+               : "=&r"(__gu_tmp.l), "+r"(__gu_err)     \
+               : "r"(ptr));                            \
                                                        \
        (val) = __gu_tmp.t;                             \
 }
@@ -135,13 +135,12 @@ struct exception_table_entry {
 #define __put_user_internal(sr, x, ptr)                                \
 ({                                                             \
        ASM_EXCEPTIONTABLE_VAR(__pu_err);                       \
-        __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);      \
                                                                \
        switch (sizeof(*(ptr))) {                               \
-       case 1: __put_user_asm(sr, "stb", __x, ptr); break;     \
-       case 2: __put_user_asm(sr, "sth", __x, ptr); break;     \
-       case 4: __put_user_asm(sr, "stw", __x, ptr); break;     \
-       case 8: STD_USER(sr, __x, ptr); break;                  \
+       case 1: __put_user_asm(sr, "stb", x, ptr); break;       \
+       case 2: __put_user_asm(sr, "sth", x, ptr); break;       \
+       case 4: __put_user_asm(sr, "stw", x, ptr); break;       \
+       case 8: STD_USER(sr, x, ptr); break;                    \
        default: BUILD_BUG();                                   \
        }                                                       \
                                                                \
@@ -150,7 +149,9 @@ struct exception_table_entry {
 
 #define __put_user(x, ptr)                                     \
 ({                                                             \
-       __put_user_internal("%%sr3,", x, ptr);                  \
+       __typeof__(&*(ptr)) __ptr = ptr;                        \
+       __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x);   \
+       __put_user_internal("%%sr3,", __x, __ptr);              \
 })
 
 #define __put_kernel_nofault(dst, src, type, err_label)                \
@@ -180,8 +181,8 @@ struct exception_table_entry {
                "1: " stx " %2,0(" sr "%1)\n"                   \
                "9:\n"                                          \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)         \
-               : "=r"(__pu_err)                                \
-               : "r"(ptr), "r"(x), "0"(__pu_err))
+               : "+r"(__pu_err)                                \
+               : "r"(ptr), "r"(x))
 
 
 #if !defined(CONFIG_64BIT)
@@ -193,8 +194,8 @@ struct exception_table_entry {
                "9:\n"                                          \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)         \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)         \
-               : "=r"(__pu_err)                                \
-               : "r"(ptr), "r"(__val), "0"(__pu_err));         \
+               : "+r"(__pu_err)                                \
+               : "r"(ptr), "r"(__val));                        \
 } while (0)
 
 #endif /* !defined(CONFIG_64BIT) */
index 237d20d..286cec4 100644 (file)
@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
        : "r" (val), "r" (regs->ior), "r" (regs->isr)
        : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
 
-       return 0;
+       return ret;
 }
 static int emulate_std(struct pt_regs *regs, int frreg, int flop)
 {
@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
        __asm__ __volatile__ (
 "      mtsp    %4, %%sr1\n"
 "      zdep    %2, 29, 2, %%r19\n"
-"      dep     %%r0, 31, 2, %2\n"
+"      dep     %%r0, 31, 2, %3\n"
 "      mtsar   %%r19\n"
 "      zvdepi  -2, 32, %%r19\n"
 "1:    ldw     0(%%sr1,%3),%%r20\n"
@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
 "      andcm   %%r21, %%r19, %%r21\n"
 "      or      %1, %%r20, %1\n"
 "      or      %2, %%r21, %2\n"
-"3:    stw     %1,0(%%sr1,%1)\n"
+"3:    stw     %1,0(%%sr1,%3)\n"
 "4:    stw     %%r1,4(%%sr1,%3)\n"
 "5:    stw     %2,8(%%sr1,%3)\n"
 "      copy    %%r0, %0\n"
@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs)
                ret = ERR_NOTHANDLED;   /* "undefined", but lets kill them. */
                break;
        }
-#ifdef CONFIG_PA20
        switch (regs->iir & OPCODE2_MASK)
        {
        case OPCODE_FLDD_L:
@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs)
                flop=1;
                ret = emulate_std(regs, R2(regs->iir),1);
                break;
+#ifdef CONFIG_PA20
        case OPCODE_LDD_L:
                ret = emulate_ldd(regs, R2(regs->iir),0);
                break;
        case OPCODE_STD_L:
                ret = emulate_std(regs, R2(regs->iir),0);
                break;
-       }
 #endif
+       }
        switch (regs->iir & OPCODE3_MASK)
        {
        case OPCODE_FLDW_L:
                flop=1;
-               ret = emulate_ldw(regs, R2(regs->iir),0);
+               ret = emulate_ldw(regs, R2(regs->iir), 1);
                break;
        case OPCODE_LDW_M:
-               ret = emulate_ldw(regs, R2(regs->iir),1);
+               ret = emulate_ldw(regs, R2(regs->iir), 0);
                break;
 
        case OPCODE_FSTW_L:
index 367f639..8603850 100644 (file)
@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
        return *((u64 *)addr);
 }
 
+u64 ioread64_lo_hi(const void __iomem *addr)
+{
+       u32 low, high;
+
+       low = ioread32(addr);
+       high = ioread32(addr + sizeof(u32));
+
+       return low + ((u64)high << 32);
+}
+
 u64 ioread64_hi_lo(const void __iomem *addr)
 {
        u32 low, high;
@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
        }
 }
 
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+       iowrite32(val, addr);
+       iowrite32(val >> 32, addr + sizeof(u32));
+}
+
 void iowrite64_hi_lo(u64 val, void __iomem *addr)
 {
        iowrite32(val >> 32, addr + sizeof(u32));
@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
 EXPORT_SYMBOL(ioread32be);
 EXPORT_SYMBOL(ioread64);
 EXPORT_SYMBOL(ioread64be);
+EXPORT_SYMBOL(ioread64_lo_hi);
 EXPORT_SYMBOL(ioread64_hi_lo);
 EXPORT_SYMBOL(iowrite8);
 EXPORT_SYMBOL(iowrite16);
@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
 EXPORT_SYMBOL(iowrite32be);
 EXPORT_SYMBOL(iowrite64);
 EXPORT_SYMBOL(iowrite64be);
+EXPORT_SYMBOL(iowrite64_lo_hi);
 EXPORT_SYMBOL(iowrite64_hi_lo);
 EXPORT_SYMBOL(ioread8_rep);
 EXPORT_SYMBOL(ioread16_rep);
index 1ae31db..1dc2e88 100644 (file)
@@ -337,9 +337,9 @@ static void __init setup_bootmem(void)
 
 static bool kernel_set_to_readonly;
 
-static void __init map_pages(unsigned long start_vaddr,
-                            unsigned long start_paddr, unsigned long size,
-                            pgprot_t pgprot, int force)
+static void __ref map_pages(unsigned long start_vaddr,
+                           unsigned long start_paddr, unsigned long size,
+                           pgprot_t pgprot, int force)
 {
        pmd_t *pmd;
        pte_t *pg_table;
@@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enable_read_write)
        flush_tlb_all();
 }
 
-void __ref free_initmem(void)
+void free_initmem(void)
 {
        unsigned long init_begin = (unsigned long)__init_begin;
        unsigned long init_end = (unsigned long)__init_end;
@@ -463,7 +463,6 @@ void __ref free_initmem(void)
        /* The init text pages are marked R-X.  We have to
         * flush the icache and mark them RW-
         *
-        * This is tricky, because map_pages is in the init section.
         * Do a dummy remap of the data section first (the data
         * section is already PAGE_KERNEL) to pull in the TLB entries
         * for map_kernel */
index ba5b1be..006cbec 100644 (file)
@@ -202,7 +202,6 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
 /*
  * The current system page and segment sizes
  */
-extern int mmu_linear_psize;
 extern int mmu_virtual_psize;
 extern int mmu_vmalloc_psize;
 extern int mmu_io_psize;
@@ -213,6 +212,7 @@ extern int mmu_io_psize;
 #define mmu_virtual_psize MMU_PAGE_4K
 #endif
 #endif
+extern int mmu_linear_psize;
 extern int mmu_vmemmap_psize;
 
 /* MMU initialization */
index 7a90000..f83866a 100644 (file)
@@ -9,7 +9,7 @@ struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges);
 int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
 int add_tce_mem_ranges(struct crash_mem **mem_ranges);
 int add_initrd_mem_range(struct crash_mem **mem_ranges);
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
 int add_htab_mem_range(struct crash_mem **mem_ranges);
 #else
 static inline int add_htab_mem_range(struct crash_mem **mem_ranges)
index fa84744..b876ef8 100644 (file)
@@ -421,14 +421,14 @@ InstructionTLBMiss:
  */
        /* Get PTE (linux-style) and check access */
        mfspr   r3,SPRN_IMISS
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
        lis     r1, TASK_SIZE@h         /* check if kernel address */
        cmplw   0,r1,r3
 #endif
        mfspr   r2, SPRN_SDR1
        li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
        rlwinm  r2, r2, 28, 0xfffff000
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
        bgt-    112f
        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
        li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
index a94b0cd..bd3734d 100644 (file)
@@ -3264,12 +3264,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
                case BARRIER_EIEIO:
                        eieio();
                        break;
+#ifdef CONFIG_PPC64
                case BARRIER_LWSYNC:
                        asm volatile("lwsync" : : : "memory");
                        break;
                case BARRIER_PTESYNC:
                        asm volatile("ptesync" : : : "memory");
                        break;
+#endif
                }
                break;
 
index 8a107ed..7d81102 100644 (file)
@@ -50,6 +50,12 @@ riscv-march-$(CONFIG_ARCH_RV32I)     := rv32ima
 riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
 riscv-march-$(CONFIG_FPU)              := $(riscv-march-y)fd
 riscv-march-$(CONFIG_RISCV_ISA_C)      := $(riscv-march-y)c
+
+# Newer binutils versions default to ISA spec version 20191213 which moves some
+# instructions from the I extension to the Zicsr and Zifencei extensions.
+toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
+riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+
 KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
 KBUILD_AFLAGS += -march=$(riscv-march-y)
 
index 56f5711..44d3385 100644 (file)
                        compatible = "canaan,k210-plic", "sifive,plic-1.0.0";
                        reg = <0xC000000 0x4000000>;
                        interrupt-controller;
-                       interrupts-extended = <&cpu0_intc 11>, <&cpu1_intc 11>;
+                       interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>,
+                                             <&cpu1_intc 11>, <&cpu1_intc 9>;
                        riscv,ndev = <65>;
                };
 
index 2a82a3b..af64b95 100644 (file)
@@ -23,7 +23,7 @@ CONFIG_SLOB=y
 CONFIG_SOC_CANAAN=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
-CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
+CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
 CONFIG_CMDLINE_FORCE=y
 # CONFIG_SECCOMP is not set
 # CONFIG_STACKPROTECTOR is not set
index 160e3a1..004372f 100644 (file)
@@ -119,7 +119,7 @@ extern phys_addr_t phys_ram_base;
        ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
 
 #define is_linear_mapping(x)   \
-       ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
+       ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
 
 #define linear_mapping_pa_to_va(x)     ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
 #define kernel_mapping_pa_to_va(y)     ({                                              \
index 7e949f2..e3549e5 100644 (file)
@@ -13,6 +13,7 @@
 
 #ifndef CONFIG_MMU
 #define KERNEL_LINK_ADDR       PAGE_OFFSET
+#define KERN_VIRT_SIZE         (UL(-1))
 #else
 
 #define ADDRESS_SPACE_END      (UL(-1))
index 612556f..ffc87e7 100644 (file)
@@ -51,6 +51,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += mcount-dyn.o
 
+obj-$(CONFIG_TRACE_IRQFLAGS)   += trace_irq.o
+
 obj-$(CONFIG_RISCV_BASE_PMU)   += perf_event.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_callchain.o
 obj-$(CONFIG_HAVE_PERF_REGS)   += perf_regs.o
index be7f05b..f7a832e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/sched/hotplug.h>
 #include <asm/irq.h>
 #include <asm/cpu_ops.h>
+#include <asm/numa.h>
 #include <asm/sbi.h>
 
 bool cpu_has_hotplug(unsigned int cpu)
@@ -40,6 +41,7 @@ int __cpu_disable(void)
                return ret;
 
        remove_cpu_topology(cpu);
+       numa_remove_cpu(cpu);
        set_cpu_online(cpu, false);
        irq_migrate_all_off_this_cpu();
 
index ed29e9c..d6a46ed 100644 (file)
@@ -108,7 +108,7 @@ _save_context:
 .option pop
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-       call trace_hardirqs_off
+       call __trace_hardirqs_off
 #endif
 
 #ifdef CONFIG_CONTEXT_TRACKING
@@ -143,7 +143,7 @@ skip_context_tracking:
        li t0, EXC_BREAKPOINT
        beq s4, t0, 1f
 #ifdef CONFIG_TRACE_IRQFLAGS
-       call trace_hardirqs_on
+       call __trace_hardirqs_on
 #endif
        csrs CSR_STATUS, SR_IE
 
@@ -234,7 +234,7 @@ ret_from_exception:
        REG_L s0, PT_STATUS(sp)
        csrc CSR_STATUS, SR_IE
 #ifdef CONFIG_TRACE_IRQFLAGS
-       call trace_hardirqs_off
+       call __trace_hardirqs_off
 #endif
 #ifdef CONFIG_RISCV_M_MODE
        /* the MPP value is too large to be used as an immediate arg for addi */
@@ -270,10 +270,10 @@ restore_all:
        REG_L s1, PT_STATUS(sp)
        andi t0, s1, SR_PIE
        beqz t0, 1f
-       call trace_hardirqs_on
+       call __trace_hardirqs_on
        j 2f
 1:
-       call trace_hardirqs_off
+       call __trace_hardirqs_off
 2:
 #endif
        REG_L a0, PT_STATUS(sp)
index 2363b43..ec07f99 100644 (file)
        add \reg, \reg, t0
 .endm
 .macro XIP_FIXUP_FLASH_OFFSET reg
-       la t1, __data_loc
-       li t0, XIP_OFFSET_MASK
-       and t1, t1, t0
-       li t1, XIP_OFFSET
-       sub t0, t0, t1
-       sub \reg, \reg, t0
+       la t0, __data_loc
+       REG_L t1, _xip_phys_offset
+       sub \reg, \reg, t1
+       add \reg, \reg, t0
 .endm
 _xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
 #else
 .macro XIP_FIXUP_OFFSET reg
 .endm
index f72527f..775d332 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  */
 
+#include <linux/bits.h>
 #include <linux/init.h>
 #include <linux/pm.h>
 #include <linux/reboot.h>
@@ -85,7 +86,7 @@ static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mas
                        pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
                        break;
                }
-               hmask |= 1 << hartid;
+               hmask |= BIT(hartid);
        }
 
        return hmask;
@@ -160,7 +161,7 @@ static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
 {
        unsigned long hart_mask;
 
-       if (!cpu_mask)
+       if (!cpu_mask || cpumask_empty(cpu_mask))
                cpu_mask = cpu_online_mask;
        hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
 
@@ -176,7 +177,7 @@ static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
        int result = 0;
        unsigned long hart_mask;
 
-       if (!cpu_mask)
+       if (!cpu_mask || cpumask_empty(cpu_mask))
                cpu_mask = cpu_online_mask;
        hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
 
@@ -249,26 +250,37 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
 
 static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
 {
-       unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+       unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
        struct sbiret ret = {0};
        int result;
 
-       if (!cpu_mask)
+       if (!cpu_mask || cpumask_empty(cpu_mask))
                cpu_mask = cpu_online_mask;
 
        for_each_cpu(cpuid, cpu_mask) {
                hartid = cpuid_to_hartid_map(cpuid);
-               if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
-                       ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
-                                       hmask, hbase, 0, 0, 0, 0);
-                       if (ret.error)
-                               goto ecall_failed;
-                       hmask = 0;
-                       hbase = 0;
+               if (hmask) {
+                       if (hartid + BITS_PER_LONG <= htop ||
+                           hbase + BITS_PER_LONG <= hartid) {
+                               ret = sbi_ecall(SBI_EXT_IPI,
+                                               SBI_EXT_IPI_SEND_IPI, hmask,
+                                               hbase, 0, 0, 0, 0);
+                               if (ret.error)
+                                       goto ecall_failed;
+                               hmask = 0;
+                       } else if (hartid < hbase) {
+                               /* shift the mask to fit lower hartid */
+                               hmask <<= hbase - hartid;
+                               hbase = hartid;
+                       }
                }
-               if (!hmask)
+               if (!hmask) {
                        hbase = hartid;
-               hmask |= 1UL << (hartid - hbase);
+                       htop = hartid;
+               } else if (hartid > htop) {
+                       htop = hartid;
+               }
+               hmask |= BIT(hartid - hbase);
        }
 
        if (hmask) {
@@ -344,25 +356,35 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
                            unsigned long start, unsigned long size,
                            unsigned long arg4, unsigned long arg5)
 {
-       unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+       unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
        int result;
 
-       if (!cpu_mask)
+       if (!cpu_mask || cpumask_empty(cpu_mask))
                cpu_mask = cpu_online_mask;
 
        for_each_cpu(cpuid, cpu_mask) {
                hartid = cpuid_to_hartid_map(cpuid);
-               if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
-                       result = __sbi_rfence_v02_call(fid, hmask, hbase,
-                                                      start, size, arg4, arg5);
-                       if (result)
-                               return result;
-                       hmask = 0;
-                       hbase = 0;
+               if (hmask) {
+                       if (hartid + BITS_PER_LONG <= htop ||
+                           hbase + BITS_PER_LONG <= hartid) {
+                               result = __sbi_rfence_v02_call(fid, hmask,
+                                               hbase, start, size, arg4, arg5);
+                               if (result)
+                                       return result;
+                               hmask = 0;
+                       } else if (hartid < hbase) {
+                               /* shift the mask to fit lower hartid */
+                               hmask <<= hbase - hartid;
+                               hbase = hartid;
+                       }
                }
-               if (!hmask)
+               if (!hmask) {
                        hbase = hartid;
-               hmask |= 1UL << (hartid - hbase);
+                       htop = hartid;
+               } else if (hartid > htop) {
+                       htop = hartid;
+               }
+               hmask |= BIT(hartid - hbase);
        }
 
        if (hmask) {
index 201ee20..14d2b53 100644 (file)
@@ -22,15 +22,16 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                             bool (*fn)(void *, unsigned long), void *arg)
 {
        unsigned long fp, sp, pc;
+       int level = 0;
 
        if (regs) {
                fp = frame_pointer(regs);
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
-               fp = (unsigned long)__builtin_frame_address(1);
-               sp = (unsigned long)__builtin_frame_address(0);
-               pc = (unsigned long)__builtin_return_address(0);
+               fp = (unsigned long)__builtin_frame_address(0);
+               sp = sp_in_global;
+               pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
                fp = task->thread.s[0];
@@ -42,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                unsigned long low, high;
                struct stackframe *frame;
 
-               if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+               if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
                        break;
 
                /* Validate frame pointer */
diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c
new file mode 100644 (file)
index 0000000..095ac97
--- /dev/null
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+
+#include <linux/irqflags.h>
+#include <linux/kprobes.h>
+#include "trace_irq.h"
+
+/*
+ * trace_hardirqs_on/off require the caller to setup frame pointer properly.
+ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel.
+ * Here we add one extra level so they can be safely called by low
+ * level entry code which $fp is used for other purpose.
+ */
+
+void __trace_hardirqs_on(void)
+{
+       trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_on);
+
+void __trace_hardirqs_off(void)
+{
+       trace_hardirqs_off();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_off);
diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h
new file mode 100644 (file)
index 0000000..99fe673
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+#ifndef __TRACE_IRQ_H
+#define __TRACE_IRQ_H
+
+void __trace_hardirqs_on(void);
+void __trace_hardirqs_off(void);
+
+#endif /* __TRACE_IRQ_H */
index 7ebaef1..ac7a252 100644 (file)
@@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN)   += kasan_init.o
 ifdef CONFIG_KASAN
 KASAN_SANITIZE_kasan_init.o := n
 KASAN_SANITIZE_init.o := n
+ifdef CONFIG_DEBUG_VIRTUAL
+KASAN_SANITIZE_physaddr.o := n
+endif
 endif
 
 obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
index 05978f7..35484d8 100644 (file)
@@ -33,7 +33,7 @@ static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
        if (unlikely(offset > MAX_REG_OFFSET))
                return;
 
-       if (!offset)
+       if (offset)
                *(unsigned long *)((unsigned long)regs + offset) = val;
 }
 
@@ -43,8 +43,8 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
        int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
        int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
 
-       regs_set_gpr(regs, reg_err, -EFAULT);
-       regs_set_gpr(regs, reg_zero, 0);
+       regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
+       regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
 
        regs->epc = get_ex_fixup(ex);
        return true;
index cf4d018..0d58803 100644 (file)
@@ -125,7 +125,6 @@ void __init mem_init(void)
        else
                swiotlb_force = SWIOTLB_NO_FORCE;
 #endif
-       high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
        memblock_free_all();
 
        print_vm_layout();
@@ -195,6 +194,7 @@ static void __init setup_bootmem(void)
 
        min_low_pfn = PFN_UP(phys_ram_base);
        max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
+       high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -232,6 +232,7 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG
 
 #ifdef CONFIG_XIP_KERNEL
 #define pt_ops                 (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
+#define riscv_pfn_base         (*(unsigned long  *)XIP_FIXUP(&riscv_pfn_base))
 #define trampoline_pg_dir      ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
 #define fixmap_pte             ((pte_t *)XIP_FIXUP(fixmap_pte))
 #define early_pg_dir           ((pgd_t *)XIP_FIXUP(early_pg_dir))
@@ -522,6 +523,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
 }
 
 #ifdef CONFIG_XIP_KERNEL
+#define phys_ram_base  (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
 extern char _xiprom[], _exiprom[], __data_loc;
 
 /* called from head.S with MMU off */
index f61f7ca..cd1a145 100644 (file)
@@ -113,8 +113,11 @@ static void __init kasan_populate_pud(pgd_t *pgd,
                base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
        } else {
                base_pud = (pud_t *)pgd_page_vaddr(*pgd);
-               if (base_pud == lm_alias(kasan_early_shadow_pud))
+               if (base_pud == lm_alias(kasan_early_shadow_pud)) {
                        base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+                       memcpy(base_pud, (void *)kasan_early_shadow_pud,
+                              sizeof(pud_t) * PTRS_PER_PUD);
+               }
        }
 
        pudp = base_pud + pud_index(vaddr);
@@ -202,8 +205,7 @@ asmlinkage void __init kasan_early_init(void)
 
        for (i = 0; i < PTRS_PER_PTE; ++i)
                set_pte(kasan_early_shadow_pte + i,
-                       mk_pte(virt_to_page(kasan_early_shadow_page),
-                              PAGE_KERNEL));
+                       pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
 
        for (i = 0; i < PTRS_PER_PMD; ++i)
                set_pmd(kasan_early_shadow_pmd + i,
index e7fd0c2..19cf25a 100644 (file)
@@ -8,12 +8,10 @@
 
 phys_addr_t __virt_to_phys(unsigned long x)
 {
-       phys_addr_t y = x - PAGE_OFFSET;
-
        /*
         * Boundary checking aginst the kernel linear mapping space.
         */
-       WARN(y >= KERN_VIRT_SIZE,
+       WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
             "virt_to_phys used for non-linear address: %pK (%pS)\n",
             (void *)x, (void *)x);
 
index 16dc57d..8511f0e 100644 (file)
@@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
 {
        a->fixup = b->fixup + delta;
        b->fixup = tmp.fixup - delta;
-       a->handler = b->handler + delta;
-       b->handler = tmp.handler - delta;
+       a->handler = b->handler;
+       if (a->handler)
+               a->handler += delta;
+       b->handler = tmp.handler;
+       if (b->handler)
+               b->handler -= delta;
 }
+#define swap_ex_entry_fixup swap_ex_entry_fixup
 
 #endif
index 267f70f..6f80ec9 100644 (file)
@@ -47,15 +47,17 @@ struct ftrace_regs {
 
 static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
 {
-       return &fregs->regs;
+       struct pt_regs *regs = &fregs->regs;
+
+       if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS))
+               return regs;
+       return NULL;
 }
 
 static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
                                                           unsigned long ip)
 {
-       struct pt_regs *regs = arch_ftrace_get_regs(fregs);
-
-       regs->psw.addr = ip;
+       fregs->regs.psw.addr = ip;
 }
 
 /*
index 4ffa8e7..ddb70fb 100644 (file)
 #define PIF_EXECVE_PGSTE_RESTART       1       /* restart execve for PGSTE binaries */
 #define PIF_SYSCALL_RET_SET            2       /* return value was set via ptrace */
 #define PIF_GUEST_FAULT                        3       /* indicates program check in sie64a */
+#define PIF_FTRACE_FULL_REGS           4       /* all register contents valid (ftrace) */
 
 #define _PIF_SYSCALL                   BIT(PIF_SYSCALL)
 #define _PIF_EXECVE_PGSTE_RESTART      BIT(PIF_EXECVE_PGSTE_RESTART)
 #define _PIF_SYSCALL_RET_SET           BIT(PIF_SYSCALL_RET_SET)
 #define _PIF_GUEST_FAULT               BIT(PIF_GUEST_FAULT)
+#define _PIF_FTRACE_FULL_REGS          BIT(PIF_FTRACE_FULL_REGS)
 
 #ifndef __ASSEMBLY__
 
index 21d62d8..89c0870 100644 (file)
@@ -159,9 +159,38 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
        return 0;
 }
 
+static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
+{
+       struct ftrace_hotpatch_trampoline *trampoline;
+       struct ftrace_insn insn;
+       s64 disp;
+       u16 opc;
+
+       if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
+               return ERR_PTR(-EFAULT);
+       disp = (s64)insn.disp * 2;
+       trampoline = (void *)(rec->ip + disp);
+       if (get_kernel_nofault(opc, &trampoline->brasl_opc))
+               return ERR_PTR(-EFAULT);
+       if (opc != 0xc015)
+               return ERR_PTR(-EINVAL);
+       return trampoline;
+}
+
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
 {
+       struct ftrace_hotpatch_trampoline *trampoline;
+       u64 old;
+
+       trampoline = ftrace_get_trampoline(rec);
+       if (IS_ERR(trampoline))
+               return PTR_ERR(trampoline);
+       if (get_kernel_nofault(old, &trampoline->interceptor))
+               return -EFAULT;
+       if (old != old_addr)
+               return -EINVAL;
+       s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
        return 0;
 }
 
@@ -188,6 +217,12 @@ static void brcl_enable(void *brcl)
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
+       struct ftrace_hotpatch_trampoline *trampoline;
+
+       trampoline = ftrace_get_trampoline(rec);
+       if (IS_ERR(trampoline))
+               return PTR_ERR(trampoline);
+       s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
        brcl_enable((void *)rec->ip);
        return 0;
 }
@@ -291,7 +326,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 
        regs = ftrace_get_regs(fregs);
        p = get_kprobe((kprobe_opcode_t *)ip);
-       if (unlikely(!p) || kprobe_disabled(p))
+       if (!regs || unlikely(!p) || kprobe_disabled(p))
                goto out;
 
        if (kprobe_running()) {
index 39bcc0e..a24177d 100644 (file)
@@ -27,6 +27,7 @@ ENDPROC(ftrace_stub)
 #define STACK_PTREGS_GPRS      (STACK_PTREGS + __PT_GPRS)
 #define STACK_PTREGS_PSW       (STACK_PTREGS + __PT_PSW)
 #define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
+#define STACK_PTREGS_FLAGS     (STACK_PTREGS + __PT_FLAGS)
 #ifdef __PACK_STACK
 /* allocate just enough for r14, r15 and backchain */
 #define TRACED_FUNC_FRAME_SIZE 24
@@ -57,6 +58,14 @@ ENDPROC(ftrace_stub)
        .if \allregs == 1
        stg     %r14,(STACK_PTREGS_PSW)(%r15)
        stosm   (STACK_PTREGS_PSW)(%r15),0
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+       mvghi   STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
+#else
+       lghi    %r14,_PIF_FTRACE_FULL_REGS
+       stg     %r14,STACK_PTREGS_FLAGS(%r15)
+#endif
+       .else
+       xc      STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
        .endif
 
        lg      %r14,(__SF_GPRS+8*8)(%r1)       # restore original return address
index f2c25d1..05327be 100644 (file)
@@ -800,6 +800,8 @@ static void __init check_initrd(void)
 static void __init reserve_kernel(void)
 {
        memblock_reserve(0, STARTUP_NORMAL_OFFSET);
+       memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
+       memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
        memblock_reserve(__amode31_base, __eamode31 - __samode31);
        memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
        memblock_reserve(__pa(_stext), _end - _stext);
index 577f1ea..2296b1f 100644 (file)
@@ -4667,6 +4667,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
                return -EINVAL;
        if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
                return -E2BIG;
+       if (!kvm_s390_pv_cpu_is_protected(vcpu))
+               return -EINVAL;
 
        switch (mop->op) {
        case KVM_S390_MEMOP_SIDA_READ:
index d056baa..9894009 100644 (file)
@@ -5,9 +5,6 @@
 
 #include "test_modules.h"
 
-#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
-REPEAT_10000(DECLARE_RETURN);
-
 /*
  * Test that modules with many relocations are loaded properly.
  */
index 43b5e4b..6371fcf 100644 (file)
@@ -47,4 +47,7 @@
        __REPEAT_10000_1(f, 8); \
        __REPEAT_10000_1(f, 9)
 
+#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
+REPEAT_10000(DECLARE_RETURN);
+
 #endif
index 84b8753..bab883c 100644 (file)
@@ -22,7 +22,7 @@
 
 #ifdef CONFIG_DEBUG_BUGVERBOSE
 
-#define _BUG_FLAGS(ins, flags)                                         \
+#define _BUG_FLAGS(ins, flags, extra)                                  \
 do {                                                                   \
        asm_inline volatile("1:\t" ins "\n"                             \
                     ".pushsection __bug_table,\"aw\"\n"                \
@@ -31,7 +31,8 @@ do {                                                                  \
                     "\t.word %c1"        "\t# bug_entry::line\n"       \
                     "\t.word %c2"        "\t# bug_entry::flags\n"      \
                     "\t.org 2b+%c3\n"                                  \
-                    ".popsection"                                      \
+                    ".popsection\n"                                    \
+                    extra                                              \
                     : : "i" (__FILE__), "i" (__LINE__),                \
                         "i" (flags),                                   \
                         "i" (sizeof(struct bug_entry)));               \
@@ -39,14 +40,15 @@ do {                                                                        \
 
 #else /* !CONFIG_DEBUG_BUGVERBOSE */
 
-#define _BUG_FLAGS(ins, flags)                                         \
+#define _BUG_FLAGS(ins, flags, extra)                                  \
 do {                                                                   \
        asm_inline volatile("1:\t" ins "\n"                             \
                     ".pushsection __bug_table,\"aw\"\n"                \
                     "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
                     "\t.word %c0"        "\t# bug_entry::flags\n"      \
                     "\t.org 2b+%c1\n"                                  \
-                    ".popsection"                                      \
+                    ".popsection\n"                                    \
+                    extra                                              \
                     : : "i" (flags),                                   \
                         "i" (sizeof(struct bug_entry)));               \
 } while (0)
@@ -55,7 +57,7 @@ do {                                                                  \
 
 #else
 
-#define _BUG_FLAGS(ins, flags)  asm volatile(ins)
+#define _BUG_FLAGS(ins, flags, extra)  asm volatile(ins)
 
 #endif /* CONFIG_GENERIC_BUG */
 
@@ -63,8 +65,8 @@ do {                                                                  \
 #define BUG()                                                  \
 do {                                                           \
        instrumentation_begin();                                \
-       _BUG_FLAGS(ASM_UD2, 0);                                 \
-       unreachable();                                          \
+       _BUG_FLAGS(ASM_UD2, 0, "");                             \
+       __builtin_unreachable();                                \
 } while (0)
 
 /*
@@ -75,9 +77,9 @@ do {                                                          \
  */
 #define __WARN_FLAGS(flags)                                    \
 do {                                                           \
+       __auto_type f = BUGFLAG_WARNING|(flags);                \
        instrumentation_begin();                                \
-       _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags));           \
-       annotate_reachable();                                   \
+       _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE);                  \
        instrumentation_end();                                  \
 } while (0)
 
index 6db4e29..65d1479 100644 (file)
 /* FREE!                                ( 7*32+10) */
 #define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
 #define X86_FEATURE_RETPOLINE          ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE   ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
index 6dcccb3..ec9830d 100644 (file)
@@ -703,7 +703,6 @@ struct kvm_vcpu_arch {
        struct fpu_guest guest_fpu;
 
        u64 xcr0;
-       u64 guest_supported_xcr0;
 
        struct kvm_pio_request pio;
        void *pio_data;
index 3faf0f9..a4a39c3 100644 (file)
 #define MSR_AMD64_ICIBSEXTDCTL         0xc001103c
 #define MSR_AMD64_IBSOPDATA4           0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL    0xc001011b
 #define MSR_AMD64_VM_PAGE_FLUSH                0xc001011e
 #define MSR_AMD64_SEV_ES_GHCB          0xc0010130
 #define MSR_AMD64_SEV                  0xc0010131
index cc74dc5..acbaeaf 100644 (file)
@@ -84,7 +84,7 @@
 #ifdef CONFIG_RETPOLINE
        ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
                      __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
 #else
        jmp     *%\reg
 #endif
@@ -94,7 +94,7 @@
 #ifdef CONFIG_RETPOLINE
        ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
                      __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
 #else
        call    *%\reg
 #endif
@@ -146,7 +146,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
        "lfence;\n"                                             \
        ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
-       X86_FEATURE_RETPOLINE_AMD)
+       X86_FEATURE_RETPOLINE_LFENCE)
 
 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
 
@@ -176,7 +176,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
        "lfence;\n"                                             \
        ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
-       X86_FEATURE_RETPOLINE_AMD)
+       X86_FEATURE_RETPOLINE_LFENCE)
 
 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
 #endif
@@ -188,9 +188,11 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
 /* The Spectre V2 mitigation variants */
 enum spectre_v2_mitigation {
        SPECTRE_V2_NONE,
-       SPECTRE_V2_RETPOLINE_GENERIC,
-       SPECTRE_V2_RETPOLINE_AMD,
-       SPECTRE_V2_IBRS_ENHANCED,
+       SPECTRE_V2_RETPOLINE,
+       SPECTRE_V2_LFENCE,
+       SPECTRE_V2_EIBRS,
+       SPECTRE_V2_EIBRS_RETPOLINE,
+       SPECTRE_V2_EIBRS_LFENCE,
 };
 
 /* The indirect branch speculation control variants */
index b00dbc5..bb2fb78 100644 (file)
@@ -220,6 +220,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define SVM_NESTED_CTL_SEV_ENABLE      BIT(1)
 #define SVM_NESTED_CTL_SEV_ES_ENABLE   BIT(2)
 
+
+/* AVIC */
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
+#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
+
+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   (0xFFULL)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK       (0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK         (1ULL << 62)
+#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK              (1ULL << 63)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK               (0xFF)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK                 (0xFF)
+
+#define AVIC_UNACCEL_ACCESS_WRITE_MASK         1
+#define AVIC_UNACCEL_ACCESS_OFFSET_MASK                0xFF0
+#define AVIC_UNACCEL_ACCESS_VECTOR_MASK                0xFFFFFFFF
+
+enum avic_ipi_failure_cause {
+       AVIC_IPI_FAILURE_INVALID_INT_TYPE,
+       AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+       AVIC_IPI_FAILURE_INVALID_TARGET,
+       AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+};
+
+
+/*
+ * 0xff is broadcast, so the max index allowed for physical APIC ID
+ * table is 0xfe.  APIC IDs above 0xff are reserved.
+ */
+#define AVIC_MAX_PHYSICAL_ID_COUNT     0xff
+
+#define AVIC_HPA_MASK  ~((0xFFFULL << 52) | 0xFFF)
+#define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
+
+
 struct vmcb_seg {
        u16 selector;
        u16 attrib;
index a963010..78e667a 100644 (file)
 /* Memory mapped from other domains has valid IOMMU entries */
 #define XEN_HVM_CPUID_IOMMU_MAPPINGS   (1u << 2)
 #define XEN_HVM_CPUID_VCPU_ID_PRESENT  (1u << 3) /* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_DOMID_PRESENT    (1u << 4) /* domid is present in ECX */
+/*
+ * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
+ * used to store high bits for the Destination ID. This expands the Destination
+ * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ */
+#define XEN_HVM_CPUID_EXT_DEST_ID      (1u << 5)
 
 /*
  * Leaf 6 (0x40000x05)
index 5007c3f..b4470ea 100644 (file)
@@ -389,7 +389,7 @@ static int emit_indirect(int op, int reg, u8 *bytes)
  *
  *   CALL *%\reg
  *
- * It also tries to inline spectre_v2=retpoline,amd when size permits.
+ * It also tries to inline spectre_v2=retpoline,lfence when size permits.
  */
 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
 {
@@ -407,7 +407,7 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
        BUG_ON(reg == 4);
 
        if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
-           !cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD))
+           !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE))
                return -1;
 
        op = insn->opcode.bytes[0];
@@ -438,9 +438,9 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
        }
 
        /*
-        * For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE.
+        * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
         */
-       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
+       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
                bytes[i++] = 0x0f;
                bytes[i++] = 0xae;
                bytes[i++] = 0xe8; /* LFENCE */
index 1c1f218..6296e1e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/prctl.h>
 #include <linux/sched/smt.h>
 #include <linux/pgtable.h>
+#include <linux/bpf.h>
 
 #include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
@@ -650,6 +651,32 @@ static inline const char *spectre_v2_module_string(void)
 static inline const char *spectre_v2_module_string(void) { return ""; }
 #endif
 
+#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
+#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+
+#ifdef CONFIG_BPF_SYSCALL
+void unpriv_ebpf_notify(int new_state)
+{
+       if (new_state)
+               return;
+
+       /* Unprivileged eBPF is enabled */
+
+       switch (spectre_v2_enabled) {
+       case SPECTRE_V2_EIBRS:
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+               break;
+       case SPECTRE_V2_EIBRS_LFENCE:
+               if (sched_smt_active())
+                       pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+               break;
+       default:
+               break;
+       }
+}
+#endif
+
 static inline bool match_option(const char *arg, int arglen, const char *opt)
 {
        int len = strlen(opt);
@@ -664,7 +691,10 @@ enum spectre_v2_mitigation_cmd {
        SPECTRE_V2_CMD_FORCE,
        SPECTRE_V2_CMD_RETPOLINE,
        SPECTRE_V2_CMD_RETPOLINE_GENERIC,
-       SPECTRE_V2_CMD_RETPOLINE_AMD,
+       SPECTRE_V2_CMD_RETPOLINE_LFENCE,
+       SPECTRE_V2_CMD_EIBRS,
+       SPECTRE_V2_CMD_EIBRS_RETPOLINE,
+       SPECTRE_V2_CMD_EIBRS_LFENCE,
 };
 
 enum spectre_v2_user_cmd {
@@ -737,6 +767,13 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
        return SPECTRE_V2_USER_CMD_AUTO;
 }
 
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+{
+       return (mode == SPECTRE_V2_EIBRS ||
+               mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+               mode == SPECTRE_V2_EIBRS_LFENCE);
+}
+
 static void __init
 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 {
@@ -804,7 +841,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
         */
        if (!boot_cpu_has(X86_FEATURE_STIBP) ||
            !smt_possible ||
-           spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+           spectre_v2_in_eibrs_mode(spectre_v2_enabled))
                return;
 
        /*
@@ -824,9 +861,11 @@ set_mode:
 
 static const char * const spectre_v2_strings[] = {
        [SPECTRE_V2_NONE]                       = "Vulnerable",
-       [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
-       [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
-       [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
+       [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
+       [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
+       [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
+       [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
+       [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
 };
 
 static const struct {
@@ -837,8 +876,12 @@ static const struct {
        { "off",                SPECTRE_V2_CMD_NONE,              false },
        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
-       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
+       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
+       { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
        { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+       { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
+       { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
+       { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
        { "auto",               SPECTRE_V2_CMD_AUTO,              false },
 };
 
@@ -875,10 +918,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        }
 
        if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
-            cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
-            cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
+            cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
+            cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
            !IS_ENABLED(CONFIG_RETPOLINE)) {
-               pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
+               pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if ((cmd == SPECTRE_V2_CMD_EIBRS ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+               pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
+            cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
+           !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+               pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
+                      mitigation_options[i].option);
                return SPECTRE_V2_CMD_AUTO;
        }
 
@@ -887,6 +950,16 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        return cmd;
 }
 
+static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
+{
+       if (!IS_ENABLED(CONFIG_RETPOLINE)) {
+               pr_err("Kernel not compiled with retpoline; no mitigation available!");
+               return SPECTRE_V2_NONE;
+       }
+
+       return SPECTRE_V2_RETPOLINE;
+}
+
 static void __init spectre_v2_select_mitigation(void)
 {
        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -907,49 +980,64 @@ static void __init spectre_v2_select_mitigation(void)
        case SPECTRE_V2_CMD_FORCE:
        case SPECTRE_V2_CMD_AUTO:
                if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
-                       mode = SPECTRE_V2_IBRS_ENHANCED;
-                       /* Force it so VMEXIT will restore correctly */
-                       x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-                       goto specv2_set_mode;
+                       mode = SPECTRE_V2_EIBRS;
+                       break;
                }
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_auto;
+
+               mode = spectre_v2_select_retpoline();
                break;
-       case SPECTRE_V2_CMD_RETPOLINE_AMD:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_amd;
+
+       case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
+               pr_err(SPECTRE_V2_LFENCE_MSG);
+               mode = SPECTRE_V2_LFENCE;
                break;
+
        case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_generic;
+               mode = SPECTRE_V2_RETPOLINE;
                break;
+
        case SPECTRE_V2_CMD_RETPOLINE:
-               if (IS_ENABLED(CONFIG_RETPOLINE))
-                       goto retpoline_auto;
+               mode = spectre_v2_select_retpoline();
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS:
+               mode = SPECTRE_V2_EIBRS;
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS_LFENCE:
+               mode = SPECTRE_V2_EIBRS_LFENCE;
+               break;
+
+       case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
+               mode = SPECTRE_V2_EIBRS_RETPOLINE;
                break;
        }
-       pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
-       return;
 
-retpoline_auto:
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
-       retpoline_amd:
-               if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
-                       pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
-                       goto retpoline_generic;
-               }
-               mode = SPECTRE_V2_RETPOLINE_AMD;
-               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
-               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
-       } else {
-       retpoline_generic:
-               mode = SPECTRE_V2_RETPOLINE_GENERIC;
+       if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+               pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+
+       if (spectre_v2_in_eibrs_mode(mode)) {
+               /* Force it so VMEXIT will restore correctly */
+               x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+       }
+
+       switch (mode) {
+       case SPECTRE_V2_NONE:
+       case SPECTRE_V2_EIBRS:
+               break;
+
+       case SPECTRE_V2_LFENCE:
+       case SPECTRE_V2_EIBRS_LFENCE:
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
+               fallthrough;
+
+       case SPECTRE_V2_RETPOLINE:
+       case SPECTRE_V2_EIBRS_RETPOLINE:
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+               break;
        }
 
-specv2_set_mode:
        spectre_v2_enabled = mode;
        pr_info("%s\n", spectre_v2_strings[mode]);
 
@@ -975,7 +1063,7 @@ specv2_set_mode:
         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
         * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
+       if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
@@ -1045,6 +1133,10 @@ void cpu_bugs_smt_update(void)
 {
        mutex_lock(&spec_ctrl_mutex);
 
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+
        switch (spectre_v2_user_stibp) {
        case SPECTRE_V2_USER_NONE:
                break;
@@ -1684,7 +1776,7 @@ static ssize_t tsx_async_abort_show_state(char *buf)
 
 static char *stibp_state(void)
 {
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+       if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
                return "";
 
        switch (spectre_v2_user_stibp) {
@@ -1714,6 +1806,27 @@ static char *ibpb_state(void)
        return "";
 }
 
+static ssize_t spectre_v2_show_state(char *buf)
+{
+       if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+               return sprintf(buf, "Vulnerable: LFENCE\n");
+
+       if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+               return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
+
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+
+       return sprintf(buf, "%s%s%s%s%s%s\n",
+                      spectre_v2_strings[spectre_v2_enabled],
+                      ibpb_state(),
+                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+                      stibp_state(),
+                      boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+                      spectre_v2_module_string());
+}
+
 static ssize_t srbds_show_state(char *buf)
 {
        return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
@@ -1739,12 +1852,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 
        case X86_BUG_SPECTRE_V2:
-               return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-                              ibpb_state(),
-                              boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-                              stibp_state(),
-                              boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
-                              spectre_v2_module_string());
+               return spectre_v2_show_state(buf);
 
        case X86_BUG_SPEC_STORE_BYPASS:
                return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
index 001808e..48afe96 100644 (file)
@@ -410,6 +410,8 @@ void sgx_encl_release(struct kref *ref)
                }
 
                kfree(entry);
+               /* Invoke scheduler to prevent soft lockups. */
+               cond_resched();
        }
 
        xa_destroy(&encl->page_array);
index 4b41efc..8e4bc64 100644 (file)
@@ -344,10 +344,8 @@ static void sgx_reclaim_pages(void)
 {
        struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
        struct sgx_backing backing[SGX_NR_TO_SCAN];
-       struct sgx_epc_section *section;
        struct sgx_encl_page *encl_page;
        struct sgx_epc_page *epc_page;
-       struct sgx_numa_node *node;
        pgoff_t page_index;
        int cnt = 0;
        int ret;
@@ -418,13 +416,7 @@ skip:
                kref_put(&encl_page->encl->refcount, sgx_encl_release);
                epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
 
-               section = &sgx_epc_sections[epc_page->section];
-               node = section->node;
-
-               spin_lock(&node->lock);
-               list_add_tail(&epc_page->list, &node->free_page_list);
-               spin_unlock(&node->lock);
-               atomic_long_inc(&sgx_nr_free_pages);
+               sgx_free_epc_page(epc_page);
        }
 }
 
index 437d7c9..75ffaef 100644 (file)
@@ -91,11 +91,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
                const void *kbuf, const void __user *ubuf)
 {
        struct fpu *fpu = &target->thread.fpu;
-       struct user32_fxsr_struct newstate;
+       struct fxregs_state newstate;
        int ret;
 
-       BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
-
        if (!cpu_feature_enabled(X86_FEATURE_FXSR))
                return -ENODEV;
 
@@ -116,9 +114,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
        /* Copy the state  */
        memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
 
-       /* Clear xmm8..15 */
+       /* Clear xmm8..15 for 32-bit callers */
        BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
-       memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16);
+       if (in_ia32_syscall())
+               memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
 
        /* Mark FP and SSE as in use when XSAVE is enabled */
        if (use_xsave())
index 02b3dda..7c7824a 100644 (file)
@@ -1558,7 +1558,10 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
                fpregs_restore_userregs();
 
        newfps->xfeatures = curfps->xfeatures | xfeatures;
-       newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+
+       if (!guest_fpu)
+               newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+
        newfps->xfd = curfps->xfd & ~xfeatures;
 
        /* Do the final updates within the locked region */
index a438217..d77481e 100644 (file)
@@ -462,19 +462,24 @@ static bool pv_tlb_flush_supported(void)
 {
        return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
                !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
-               kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+               kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+               !boot_cpu_has(X86_FEATURE_MWAIT) &&
+               (num_possible_cpus() != 1));
 }
 
 static bool pv_ipi_supported(void)
 {
-       return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
+       return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
+              (num_possible_cpus() != 1));
 }
 
 static bool pv_sched_yield_supported(void)
 {
        return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
                !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
-           kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+           kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+           !boot_cpu_has(X86_FEATURE_MWAIT) &&
+           (num_possible_cpus() != 1));
 }
 
 #define KVM_IPI_CLUSTER_SIZE   (2 * BITS_PER_LONG)
@@ -619,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 
        /* Make sure other vCPUs get a chance to run if they need to. */
        for_each_cpu(cpu, mask) {
-               if (vcpu_is_preempted(cpu)) {
+               if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
                        kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
                        break;
                }
index a35cbf9..c5caa73 100644 (file)
@@ -239,6 +239,9 @@ static void __init kvmclock_init_mem(void)
 
 static int __init kvm_setup_vsyscall_timeinfo(void)
 {
+       if (!kvm_para_available() || !kvmclock)
+               return 0;
+
        kvmclock_init_mem();
 
 #ifdef CONFIG_X86_64
index 6d2244c..8d2f2f9 100644 (file)
@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
        },
        [REGSET_FP] = {
                .core_note_type = NT_PRFPREG,
-               .n = sizeof(struct user_i387_struct) / sizeof(long),
+               .n = sizeof(struct fxregs_state) / sizeof(long),
                .size = sizeof(long), .align = sizeof(long),
                .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
        },
@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
        },
        [REGSET_XFP] = {
                .core_note_type = NT_PRXFPREG,
-               .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
+               .n = sizeof(struct fxregs_state) / sizeof(u32),
                .size = sizeof(u32), .align = sizeof(u32),
                .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
        },
index 9ae64f9..9b9fb78 100644 (file)
@@ -1,5 +1,4 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/dmi.h>
 #include <linux/ioport.h>
 #include <asm/e820/api.h>
 
@@ -24,31 +23,11 @@ static void resource_clip(struct resource *res, resource_size_t start,
                res->start = end + 1;
 }
 
-/*
- * Some BIOS-es contain a bug where they add addresses which map to
- * system RAM in the PCI host bridge window returned by the ACPI _CRS
- * method, see commit 4dc2287c1805 ("x86: avoid E820 regions when
- * allocating address space"). To avoid this Linux by default excludes
- * E820 reservations when allocating addresses since 2010.
- * In 2019 some systems have shown-up with E820 reservations which cover
- * the entire _CRS returned PCI host bridge window, causing all attempts
- * to assign memory to PCI BARs to fail if Linux uses E820 reservations.
- *
- * Ideally Linux would fully stop using E820 reservations, but then
- * the old systems this was added for will regress.
- * Instead keep the old behavior for old systems, while ignoring the
- * E820 reservations for any systems from now on.
- */
 static void remove_e820_regions(struct resource *avail)
 {
-       int i, year = dmi_get_bios_year();
+       int i;
        struct e820_entry *entry;
 
-       if (year >= 2018)
-               return;
-
-       pr_info_once("PCI: Removing E820 reservations from host bridge windows\n");
-
        for (i = 0; i < e820_table->nr_entries; i++) {
                entry = &e820_table->entries[i];
 
index 494d4d3..b8f8d26 100644 (file)
@@ -282,6 +282,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        struct kvm_cpuid_entry2 *best;
+       u64 guest_supported_xcr0;
 
        best = kvm_find_cpuid_entry(vcpu, 1, 0);
        if (best && apic) {
@@ -293,9 +294,11 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                kvm_apic_set_version(vcpu);
        }
 
-       vcpu->arch.guest_supported_xcr0 =
+       guest_supported_xcr0 =
                cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
 
+       vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
+
        kvm_update_pv_runtime(vcpu);
 
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
index d7e6fde..9322e63 100644 (file)
@@ -2306,7 +2306,12 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
                apic->irr_pending = true;
                apic->isr_count = 1;
        } else {
-               apic->irr_pending = (apic_search_irr(apic) != -1);
+               /*
+                * Don't clear irr_pending, searching the IRR can race with
+                * updates from the CPU as APICv is still active from hardware's
+                * perspective.  The flag will be cleared as appropriate when
+                * KVM injects the interrupt.
+                */
                apic->isr_count = count_vectors(apic->regs + APIC_ISR);
        }
 }
index 593093b..5628d0b 100644 (file)
@@ -3565,7 +3565,7 @@ set_root_pgd:
 out_unlock:
        write_unlock(&vcpu->kvm->mmu_lock);
 
-       return 0;
+       return r;
 }
 
 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
@@ -3889,12 +3889,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
        walk_shadow_page_lockless_end(vcpu);
 }
 
+static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
+{
+       /* make sure the token value is not 0 */
+       u32 id = vcpu->arch.apf.id;
+
+       if (id << 12 == 0)
+               vcpu->arch.apf.id = 1;
+
+       return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+}
+
 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                                    gfn_t gfn)
 {
        struct kvm_arch_async_pf arch;
 
-       arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+       arch.token = alloc_apf_token(vcpu);
        arch.gfn = gfn;
        arch.direct_map = vcpu->arch.mmu->direct_map;
        arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
index f614f95..b1a0299 100644 (file)
@@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
 }
 
 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
-                                 unsigned config, bool exclude_user,
+                                 u64 config, bool exclude_user,
                                  bool exclude_kernel, bool intr,
                                  bool in_tx, bool in_tx_cp)
 {
@@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)
 
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 {
-       unsigned config, type = PERF_TYPE_RAW;
+       u64 config;
+       u32 type = PERF_TYPE_RAW;
        struct kvm *kvm = pmc->vcpu->kvm;
        struct kvm_pmu_event_filter *filter;
        bool allow_event = true;
@@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        }
 
        if (type == PERF_TYPE_RAW)
-               config = eventsel & X86_RAW_EVENT_MASK;
+               config = eventsel & AMD64_RAW_EVENT_MASK;
 
        if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
                return;
index 90364d0..fb3e207 100644 (file)
 #include "irq.h"
 #include "svm.h"
 
-#define SVM_AVIC_DOORBELL      0xc001011b
-
-#define AVIC_HPA_MASK  ~((0xFFFULL << 52) | 0xFFF)
-
-/*
- * 0xff is broadcast, so the max index allowed for physical APIC ID
- * table is 0xfe.  APIC IDs above 0xff are reserved.
- */
-#define AVIC_MAX_PHYSICAL_ID_COUNT     255
-
-#define AVIC_UNACCEL_ACCESS_WRITE_MASK         1
-#define AVIC_UNACCEL_ACCESS_OFFSET_MASK                0xFF0
-#define AVIC_UNACCEL_ACCESS_VECTOR_MASK                0xFFFFFFFF
-
 /* AVIC GATAG is encoded using VM and VCPU IDs */
 #define AVIC_VCPU_ID_BITS              8
 #define AVIC_VCPU_ID_MASK              ((1 << AVIC_VCPU_ID_BITS) - 1)
@@ -73,12 +59,6 @@ struct amd_svm_iommu_ir {
        void *data;             /* Storing pointer to struct amd_ir_data */
 };
 
-enum avic_ipi_failure_cause {
-       AVIC_IPI_FAILURE_INVALID_INT_TYPE,
-       AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
-       AVIC_IPI_FAILURE_INVALID_TARGET,
-       AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
-};
 
 /* Note:
  * This function is called from IOMMU driver to notify
@@ -289,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+void avic_ring_doorbell(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Note, the vCPU could get migrated to a different pCPU at any point,
+        * which could result in signalling the wrong/previous pCPU.  But if
+        * that happens the vCPU is guaranteed to do a VMRUN (after being
+        * migrated) and thus will process pending interrupts, i.e. a doorbell
+        * is not needed (and the spurious one is harmless).
+        */
+       int cpu = READ_ONCE(vcpu->cpu);
+
+       if (cpu != get_cpu())
+               wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
+       put_cpu();
+}
+
 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
                                   u32 icrl, u32 icrh)
 {
@@ -304,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
                                        GET_APIC_DEST_FIELD(icrh),
-                                       icrl & APIC_DEST_MASK))
-                       kvm_vcpu_wake_up(vcpu);
+                                       icrl & APIC_DEST_MASK)) {
+                       vcpu->arch.apic->irr_pending = true;
+                       svm_complete_interrupt_delivery(vcpu,
+                                                       icrl & APIC_MODE_MASK,
+                                                       icrl & APIC_INT_LEVELTRIG,
+                                                       icrl & APIC_VECTOR_MASK);
+               }
        }
 }
 
@@ -345,8 +346,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
                avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
                break;
        case AVIC_IPI_FAILURE_INVALID_TARGET:
-               WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
-                         index, vcpu->vcpu_id, icrh, icrl);
                break;
        case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
                WARN_ONCE(1, "Invalid backing page\n");
@@ -669,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
        return;
 }
 
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
-{
-       if (!vcpu->arch.apicv_active)
-               return -1;
-
-       kvm_lapic_set_irr(vec, vcpu->arch.apic);
-
-       /*
-        * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
-        * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
-        * the read of guest_mode, which guarantees that either VMRUN will see
-        * and process the new vIRR entry, or that the below code will signal
-        * the doorbell if the vCPU is already running in the guest.
-        */
-       smp_mb__after_atomic();
-
-       /*
-        * Signal the doorbell to tell hardware to inject the IRQ if the vCPU
-        * is in the guest.  If the vCPU is not in the guest, hardware will
-        * automatically process AVIC interrupts at VMRUN.
-        */
-       if (vcpu->mode == IN_GUEST_MODE) {
-               int cpu = READ_ONCE(vcpu->cpu);
-
-               /*
-                * Note, the vCPU could get migrated to a different pCPU at any
-                * point, which could result in signalling the wrong/previous
-                * pCPU.  But if that happens the vCPU is guaranteed to do a
-                * VMRUN (after being migrated) and thus will process pending
-                * interrupts, i.e. a doorbell is not needed (and the spurious
-                * one is harmless).
-                */
-               if (cpu != get_cpu())
-                       wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
-               put_cpu();
-       } else {
-               /*
-                * Wake the vCPU if it was blocking.  KVM will then detect the
-                * pending IRQ when checking if the vCPU has a wake event.
-                */
-               kvm_vcpu_wake_up(vcpu);
-       }
-
-       return 0;
-}
-
 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
 {
        return false;
index 1218b5a..39d280e 100644 (file)
@@ -1457,18 +1457,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
            !__nested_vmcb_check_save(vcpu, &save_cached))
                goto out_free;
 
-       /*
-        * While the nested guest CR3 is already checked and set by
-        * KVM_SET_SREGS, it was set when nested state was yet loaded,
-        * thus MMU might not be initialized correctly.
-        * Set it again to fix this.
-        */
-
-       ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
-                                 nested_npt_enabled(svm), false);
-       if (WARN_ON_ONCE(ret))
-               goto out_free;
-
 
        /*
         * All checks done, we can enter guest mode. Userspace provides
@@ -1494,6 +1482,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 
        svm_switch_vmcb(svm, &svm->nested.vmcb02);
        nested_vmcb02_prepare_control(svm);
+
+       /*
+        * While the nested guest CR3 is already checked and set by
+        * KVM_SET_SREGS, it was set when nested state was yet loaded,
+        * thus MMU might not be initialized correctly.
+        * Set it again to fix this.
+        */
+
+       ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+                                 nested_npt_enabled(svm), false);
+       if (WARN_ON_ONCE(ret))
+               goto out_free;
+
+
        kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
        ret = 0;
 out_free:
index a290efb..fd3a00c 100644 (file)
@@ -1585,6 +1585,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 hcr0 = cr0;
+       bool old_paging = is_paging(vcpu);
 
 #ifdef CONFIG_X86_64
        if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
@@ -1601,8 +1602,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 #endif
        vcpu->arch.cr0 = cr0;
 
-       if (!npt_enabled)
+       if (!npt_enabled) {
                hcr0 |= X86_CR0_PG | X86_CR0_WP;
+               if (old_paging != is_paging(vcpu))
+                       svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
+       }
 
        /*
         * re-enable caching here because the QEMU bios
@@ -1646,8 +1650,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                svm_flush_tlb(vcpu);
 
        vcpu->arch.cr4 = cr4;
-       if (!npt_enabled)
+       if (!npt_enabled) {
                cr4 |= X86_CR4_PAE;
+
+               if (!is_paging(vcpu))
+                       cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
+       }
        cr4 |= host_cr4_mce;
        to_svm(vcpu)->vmcb->save.cr4 = cr4;
        vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -2685,8 +2693,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u64 data = msr->data;
        switch (ecx) {
        case MSR_AMD64_TSC_RATIO:
-               if (!msr->host_initiated && !svm->tsc_scaling_enabled)
-                       return 1;
+
+               if (!svm->tsc_scaling_enabled) {
+
+                       if (!msr->host_initiated)
+                               return 1;
+                       /*
+                        * In case TSC scaling is not enabled, always
+                        * leave this MSR at the default value.
+                        *
+                        * Due to bug in qemu 6.2.0, it would try to set
+                        * this msr to 0 if tsc scaling is not enabled.
+                        * Ignore this value as well.
+                        */
+                       if (data != 0 && data != svm->tsc_ratio_msr)
+                               return 1;
+                       break;
+               }
 
                if (data & TSC_RATIO_RSVD)
                        return 1;
@@ -3291,21 +3314,55 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
                SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
 }
 
-static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
-                                 int trig_mode, int vector)
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+                                    int trig_mode, int vector)
 {
-       struct kvm_vcpu *vcpu = apic->vcpu;
+       /*
+        * vcpu->arch.apicv_active must be read after vcpu->mode.
+        * Pairs with smp_store_release in vcpu_enter_guest.
+        */
+       bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
 
-       if (svm_deliver_avic_intr(vcpu, vector)) {
-               kvm_lapic_set_irr(vector, apic);
+       if (!READ_ONCE(vcpu->arch.apicv_active)) {
+               /* Process the interrupt via inject_pending_event */
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_vcpu_kick(vcpu);
+               return;
+       }
+
+       trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+       if (in_guest_mode) {
+               /*
+                * Signal the doorbell to tell hardware to inject the IRQ.  If
+                * the vCPU exits the guest before the doorbell chimes, hardware
+                * will automatically process AVIC interrupts at the next VMRUN.
+                */
+               avic_ring_doorbell(vcpu);
        } else {
-               trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
-                                          trig_mode, vector);
+               /*
+                * Wake the vCPU if it was blocking.  KVM will then detect the
+                * pending IRQ when checking if the vCPU has a wake event.
+                */
+               kvm_vcpu_wake_up(vcpu);
        }
 }
 
+static void svm_deliver_interrupt(struct kvm_lapic *apic,  int delivery_mode,
+                                 int trig_mode, int vector)
+{
+       kvm_lapic_set_irr(vector, apic);
+
+       /*
+        * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
+        * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
+        * the read of guest_mode.  This guarantees that either VMRUN will see
+        * and process the new vIRR entry, or that svm_complete_interrupt_delivery
+        * will signal the doorbell if the CPU has already entered the guest.
+        */
+       smp_mb__after_atomic();
+       svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
+}
+
 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3353,11 +3410,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        if (svm->nested.nested_run_pending)
                return -EBUSY;
 
+       if (svm_nmi_blocked(vcpu))
+               return 0;
+
        /* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
        if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
                return -EBUSY;
-
-       return !svm_nmi_blocked(vcpu);
+       return 1;
 }
 
 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -3409,9 +3468,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+
        if (svm->nested.nested_run_pending)
                return -EBUSY;
 
+       if (svm_interrupt_blocked(vcpu))
+               return 0;
+
        /*
         * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
         * e.g. if the IRQ arrived asynchronously after checking nested events.
@@ -3419,7 +3482,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
                return -EBUSY;
 
-       return !svm_interrupt_blocked(vcpu);
+       return 1;
 }
 
 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -4150,11 +4213,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        if (svm->nested.nested_run_pending)
                return -EBUSY;
 
+       if (svm_smi_blocked(vcpu))
+               return 0;
+
        /* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
        if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
                return -EBUSY;
 
-       return !svm_smi_blocked(vcpu);
+       return 1;
 }
 
 static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -4248,11 +4314,18 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
         * Enter the nested guest now
         */
 
+       vmcb_mark_all_dirty(svm->vmcb01.ptr);
+
        vmcb12 = map.hva;
        nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
        nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
        ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
 
+       if (ret)
+               goto unmap_save;
+
+       svm->nested.nested_run_pending = 1;
+
 unmap_save:
        kvm_vcpu_unmap(vcpu, &map_save, true);
 unmap_map:
@@ -4637,6 +4710,7 @@ static __init void svm_set_cpu_caps(void)
        /* CPUID 0x80000001 and 0x8000000A (SVM features) */
        if (nested) {
                kvm_cpu_cap_set(X86_FEATURE_SVM);
+               kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
 
                if (nrips)
                        kvm_cpu_cap_set(X86_FEATURE_NRIPS);
index 7352535..fa98d68 100644 (file)
@@ -489,6 +489,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
                          int read, int write);
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+                                    int trig_mode, int vec);
 
 /* nested.c */
 
@@ -556,17 +558,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
 
 /* avic.c */
 
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   (0xFFULL)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK       (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK         (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK              (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
-
 int avic_ga_log_notifier(u32 ga_tag);
 void avic_vm_destroy(struct kvm *kvm);
 int avic_vm_init(struct kvm *kvm);
@@ -583,12 +574,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
                       uint32_t guest_irq, bool set);
 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void avic_ring_doorbell(struct kvm_vcpu *vcpu);
 
 /* sev.c */
 
index ba34e94..dc822a1 100644 (file)
@@ -246,8 +246,7 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
        src = &prev->host_state;
        dest = &vmx->loaded_vmcs->host_state;
 
-       vmx_set_vmcs_host_state(dest, src->cr3, src->fs_sel, src->gs_sel,
-                               src->fs_base, src->gs_base);
+       vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
        dest->ldt_sel = src->ldt_sel;
 #ifdef CONFIG_X86_64
        dest->ds_sel = src->ds_sel;
@@ -3056,7 +3055,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr4;
+       unsigned long cr3, cr4;
        bool vm_fail;
 
        if (!nested_early_check)
@@ -3079,6 +3078,12 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
         */
        vmcs_writel(GUEST_RFLAGS, 0);
 
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               vmx->loaded_vmcs->host_state.cr3 = cr3;
+       }
+
        cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
                vmcs_writel(HOST_CR4, cr4);
index 6c27bd0..b730d79 100644 (file)
@@ -1080,14 +1080,9 @@ static void pt_guest_exit(struct vcpu_vmx *vmx)
                wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
 }
 
-void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
-                            u16 fs_sel, u16 gs_sel,
-                            unsigned long fs_base, unsigned long gs_base)
+void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
+                       unsigned long fs_base, unsigned long gs_base)
 {
-       if (unlikely(cr3 != host->cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               host->cr3 = cr3;
-       }
        if (unlikely(fs_sel != host->fs_sel)) {
                if (!(fs_sel & 7))
                        vmcs_write16(HOST_FS_SELECTOR, fs_sel);
@@ -1182,9 +1177,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
        gs_base = segment_base(gs_sel);
 #endif
 
-       vmx_set_vmcs_host_state(host_state, __get_current_cr3_fast(),
-                               fs_sel, gs_sel, fs_base, gs_base);
-
+       vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
        vmx->guest_state_loaded = true;
 }
 
@@ -6791,7 +6784,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr4;
+       unsigned long cr3, cr4;
 
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
@@ -6834,6 +6827,19 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
        vcpu->arch.regs_dirty = 0;
 
+       /*
+        * Refresh vmcs.HOST_CR3 if necessary.  This must be done immediately
+        * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
+        * it switches back to the current->mm, which can occur in KVM context
+        * when switching to a temporary mm to patch kernel code, e.g. if KVM
+        * toggles a static key while handling a VM-Exit.
+        */
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               vmx->loaded_vmcs->host_state.cr3 = cr3;
+       }
+
        cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
                vmcs_writel(HOST_CR4, cr4);
@@ -7659,6 +7665,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                if (ret)
                        return ret;
 
+               vmx->nested.nested_run_pending = 1;
                vmx->nested.smm.guest_mode = false;
        }
        return 0;
index 7f2c82e..9c6bfcd 100644 (file)
@@ -374,9 +374,8 @@ int allocate_vpid(void);
 void free_vpid(int vpid);
 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
-void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
-                            u16 fs_sel, u16 gs_sel,
-                            unsigned long fs_base, unsigned long gs_base);
+void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
+                       unsigned long fs_base, unsigned long gs_base);
 int vmx_get_cpl(struct kvm_vcpu *vcpu);
 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
index 7131d73..eb40296 100644 (file)
@@ -984,6 +984,18 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
 
+static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
+}
+
+#ifdef CONFIG_X86_64
+static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
+{
+       return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
+}
+#endif
+
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
        u64 xcr0 = xcr;
@@ -1003,7 +1015,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
         * saving.  However, xcr0 bit 0 is always set, even if the
         * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
         */
-       valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
+       valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
        if (xcr0 & ~valid_bits)
                return 1;
 
@@ -2351,10 +2363,12 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
        return tsc;
 }
 
+#ifdef CONFIG_X86_64
 static inline int gtod_is_based_on_tsc(int mode)
 {
        return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
 }
+#endif
 
 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
 {
@@ -3706,8 +3720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                    !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
                        return 1;
 
-               if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
-                            vcpu->arch.guest_supported_xcr0))
+               if (data & ~kvm_guest_supported_xfd(vcpu))
                        return 1;
 
                fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
@@ -3717,8 +3730,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                    !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
                        return 1;
 
-               if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
-                            vcpu->arch.guest_supported_xcr0))
+               if (data & ~kvm_guest_supported_xfd(vcpu))
                        return 1;
 
                vcpu->arch.guest_fpu.xfd_err = data;
@@ -4233,6 +4245,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
        case KVM_CAP_VCPU_ATTRIBUTES:
        case KVM_CAP_SYS_ATTRIBUTES:
+       case KVM_CAP_ENABLE_CAP:
                r = 1;
                break;
        case KVM_CAP_EXIT_HYPERCALL:
@@ -8942,6 +8955,13 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
        if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
                return -KVM_EOPNOTSUPP;
 
+       /*
+        * When tsc is in permanent catchup mode guests won't be able to use
+        * pvclock_read_retry loop to get consistent view of pvclock
+        */
+       if (vcpu->arch.tsc_always_catchup)
+               return -KVM_EOPNOTSUPP;
+
        if (!kvm_get_walltime_and_clockread(&ts, &cycle))
                return -KVM_EOPNOTSUPP;
 
@@ -9160,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
                likely(!pic_in_kernel(vcpu->kvm));
 }
 
+/* Called within kvm->srcu read side.  */
 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
@@ -9168,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
        kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
 
-       /*
-        * The call to kvm_ready_for_interrupt_injection() may end up in
-        * kvm_xen_has_interrupt() which may require the srcu lock to be
-        * held, to protect against changes in the vcpu_info address.
-        */
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
        kvm_run->ready_for_interrupt_injection =
                pic_in_kernel(vcpu->kvm) ||
                kvm_vcpu_ready_for_interrupt_injection(vcpu);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        if (is_smm(vcpu))
                kvm_run->flags |= KVM_RUN_X86_SMM;
@@ -9795,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
 
 /*
+ * Called within kvm->srcu read side.
  * Returns 1 to let vcpu_run() continue the guest execution loop without
  * exiting to the userspace.  Otherwise, the value will be returned to the
  * userspace.
@@ -9983,7 +9998,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * result in virtual interrupt delivery.
         */
        local_irq_disable();
-       vcpu->mode = IN_GUEST_MODE;
+
+       /* Store vcpu->apicv_active before vcpu->mode.  */
+       smp_store_release(&vcpu->mode, IN_GUEST_MODE);
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
@@ -10171,6 +10188,7 @@ out:
        return r;
 }
 
+/* Called within kvm->srcu read side.  */
 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 {
        bool hv_timer;
@@ -10230,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
                !vcpu->arch.apf.halted);
 }
 
+/* Called within kvm->srcu read side.  */
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
        struct kvm *kvm = vcpu->kvm;
 
-       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
        vcpu->arch.l1tf_flush_l1d = true;
 
        for (;;) {
@@ -10263,14 +10281,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (__xfer_to_guest_mode_work_pending()) {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                        r = xfer_to_guest_mode_handle_work(vcpu);
+                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                        if (r)
                                return r;
-                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                }
        }
 
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-
        return r;
 }
 
@@ -10376,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
+       struct kvm *kvm = vcpu->kvm;
        int r;
 
        vcpu_load(vcpu);
@@ -10383,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        kvm_run->flags = 0;
        kvm_load_guest_fpu(vcpu);
 
+       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -10393,7 +10411,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * use before KVM has ever run the vCPU.
                 */
                WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
+
+               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                kvm_vcpu_block(vcpu);
+               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;
                        goto out;
@@ -10453,8 +10475,9 @@ out:
        if (kvm_run->kvm_valid_regs)
                store_regs(vcpu);
        post_kvm_run_save(vcpu);
-       kvm_sigset_deactivate(vcpu);
+       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 
+       kvm_sigset_deactivate(vcpu);
        vcpu_put(vcpu);
        return r;
 }
index bad5753..74be1fd 100644 (file)
@@ -133,32 +133,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
 {
        struct kvm_vcpu_xen *vx = &v->arch.xen;
+       struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
+       struct kvm_memslots *slots = kvm_memslots(v->kvm);
+       bool atomic = (state == RUNSTATE_runnable);
        uint64_t state_entry_time;
-       unsigned int offset;
+       int __user *user_state;
+       uint64_t __user *user_times;
 
        kvm_xen_update_runstate(v, state);
 
        if (!vx->runstate_set)
                return;
 
-       BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+       if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
+           kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
+               return;
+
+       /* We made sure it fits in a single page */
+       BUG_ON(!ghc->memslot);
+
+       if (atomic)
+               pagefault_disable();
 
-       offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
-#ifdef CONFIG_X86_64
        /*
-        * The only difference is alignment of uint64_t in 32-bit.
-        * So the first field 'state' is accessed directly using
-        * offsetof() (where its offset happens to be zero), while the
-        * remaining fields which are all uint64_t, start at 'offset'
-        * which we tweak here by adding 4.
+        * The only difference between 32-bit and 64-bit versions of the
+        * runstate struct us the alignment of uint64_t in 32-bit, which
+        * means that the 64-bit version has an additional 4 bytes of
+        * padding after the first field 'state'.
+        *
+        * So we use 'int __user *user_state' to point to the state field,
+        * and 'uint64_t __user *user_times' for runstate_entry_time. So
+        * the actual array of time[] in each state starts at user_times[1].
         */
+       BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
+       BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
+       user_state = (int __user *)ghc->hva;
+
+       BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+
+       user_times = (uint64_t __user *)(ghc->hva +
+                                        offsetof(struct compat_vcpu_runstate_info,
+                                                 state_entry_time));
+#ifdef CONFIG_X86_64
        BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
                     offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
        BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
                     offsetof(struct compat_vcpu_runstate_info, time) + 4);
 
        if (v->kvm->arch.xen.long_mode)
-               offset = offsetof(struct vcpu_runstate_info, state_entry_time);
+               user_times = (uint64_t __user *)(ghc->hva +
+                                                offsetof(struct vcpu_runstate_info,
+                                                         state_entry_time));
 #endif
        /*
         * First write the updated state_entry_time at the appropriate
@@ -172,10 +197,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
        BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
                     sizeof(state_entry_time));
 
-       if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
-                                         &state_entry_time, offset,
-                                         sizeof(state_entry_time)))
-               return;
+       if (__put_user(state_entry_time, user_times))
+               goto out;
        smp_wmb();
 
        /*
@@ -189,11 +212,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
        BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
                     sizeof(vx->current_runstate));
 
-       if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
-                                         &vx->current_runstate,
-                                         offsetof(struct vcpu_runstate_info, state),
-                                         sizeof(vx->current_runstate)))
-               return;
+       if (__put_user(vx->current_runstate, user_state))
+               goto out;
 
        /*
         * Write the actual runstate times immediately after the
@@ -208,24 +228,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
        BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
                     sizeof(vx->runstate_times));
 
-       if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
-                                         &vx->runstate_times[0],
-                                         offset + sizeof(u64),
-                                         sizeof(vx->runstate_times)))
-               return;
-
+       if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
+               goto out;
        smp_wmb();
 
        /*
         * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
         * runstate_entry_time field.
         */
-
        state_entry_time &= ~XEN_RUNSTATE_UPDATE;
-       if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
-                                         &state_entry_time, offset,
-                                         sizeof(state_entry_time)))
-               return;
+       __put_user(state_entry_time, user_times);
+       smp_wmb();
+
+ out:
+       mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+       if (atomic)
+               pagefault_enable();
 }
 
 int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
@@ -443,6 +462,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                        break;
                }
 
+               /* It must fit within a single page */
+               if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
+                       r = -EINVAL;
+                       break;
+               }
+
                r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
                                              &vcpu->arch.xen.vcpu_info_cache,
                                              data->u.gpa,
@@ -460,6 +485,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                        break;
                }
 
+               /* It must fit within a single page */
+               if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
+                       r = -EINVAL;
+                       break;
+               }
+
                r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
                                              &vcpu->arch.xen.vcpu_time_info_cache,
                                              data->u.gpa,
@@ -481,6 +512,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                        break;
                }
 
+               /* It must fit within a single page */
+               if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
+                       r = -EINVAL;
+                       break;
+               }
+
                r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
                                              &vcpu->arch.xen.runstate_cache,
                                              data->u.gpa,
index 89b3fb2..afbdda5 100644 (file)
@@ -34,7 +34,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
 
        ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
                      __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_AMD
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE
 
 .endm
 
index 2b1e266..0ecb140 100644 (file)
@@ -394,7 +394,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
        u8 *prog = *pprog;
 
 #ifdef CONFIG_RETPOLINE
-       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
+       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
                EMIT_LFENCE();
                EMIT2(0xFF, 0xE0 + reg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
index 6448c50..517a9d8 100644 (file)
@@ -185,8 +185,7 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
 
        if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
                xen_teardown_timer(cpu);
-
-       return 0;
+       return 0;
 }
 
 static bool no_vector_callback __initdata;
@@ -248,6 +247,11 @@ static __init bool xen_x2apic_available(void)
        return x2apic_supported();
 }
 
+static bool __init msi_ext_dest_id(void)
+{
+       return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID;
+}
+
 static __init void xen_hvm_guest_late_init(void)
 {
 #ifdef CONFIG_XEN_PVH
@@ -310,6 +314,7 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
        .init.x2apic_available  = xen_x2apic_available,
        .init.init_mem_mapping  = xen_hvm_init_mem_mapping,
        .init.guest_late_init   = xen_hvm_guest_late_init,
+       .init.msi_ext_dest_id   = msi_ext_dest_id,
        .runtime.pin_vcpu       = xen_pin_vcpu,
        .ignore_nopv            = true,
 };
index 31b1e34..14ea32e 100644 (file)
@@ -57,6 +57,14 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
                screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
                screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
 
+               if (size >= offsetof(struct dom0_vga_console_info,
+                                    u.vesa_lfb.ext_lfb_base)
+                   + sizeof(info->u.vesa_lfb.ext_lfb_base)
+                   && info->u.vesa_lfb.ext_lfb_base) {
+                       screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
+                       screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+               }
+
                if (info->video_type == XEN_VGATYPE_EFI_LFB) {
                        screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
                        break;
@@ -66,14 +74,6 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
                                     u.vesa_lfb.mode_attrs)
                    + sizeof(info->u.vesa_lfb.mode_attrs))
                        screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
-
-               if (size >= offsetof(struct dom0_vga_console_info,
-                                    u.vesa_lfb.ext_lfb_base)
-                   + sizeof(info->u.vesa_lfb.ext_lfb_base)
-                   && info->u.vesa_lfb.ext_lfb_base) {
-                       screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
-                       screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-               }
                break;
        }
 }
index 0c612a9..36a66e9 100644 (file)
@@ -7018,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
        spin_unlock_irq(&bfqd->lock);
 #endif
 
+       wbt_enable_default(bfqd->queue);
+
        kfree(bfqd);
 }
 
index d93e3bb..1039515 100644 (file)
@@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-void blk_set_queue_dying(struct request_queue *q)
-{
-       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
-       blk_queue_start_drain(q);
-}
-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q)
        WARN_ON_ONCE(blk_queue_registered(q));
 
        /* mark @q DYING, no new request or merges will be allowed afterwards */
-       blk_set_queue_dying(q);
+       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+       blk_queue_start_drain(q);
 
        blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
index 4526add..c7f71d8 100644 (file)
@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
                if (bytes > len)
                        bytes = len;
 
-               page = alloc_page(GFP_NOIO | gfp_mask);
+               page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
                if (!page)
                        goto cleanup;
 
index 1adfe48..d69ca91 100644 (file)
@@ -736,6 +736,10 @@ static void blk_complete_request(struct request *req)
 
                /* Completion has already been traced */
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+
+               if (req_op(req) == REQ_OP_ZONE_APPEND)
+                       bio->bi_iter.bi_sector = req->__sector;
+
                if (!is_flush)
                        bio_endio(bio);
                bio = next;
index ec98aed..482df2a 100644 (file)
@@ -525,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q)
                kobject_del(&e->kobj);
 
                e->registered = 0;
-               /* Re-enable throttling in case elevator disabled it */
-               wbt_enable_default(q);
        }
 }
 
index 4f59e0f..a18e7fb 100644 (file)
@@ -289,6 +289,8 @@ static void blkdev_bio_end_io_async(struct bio *bio)
        struct kiocb *iocb = dio->iocb;
        ssize_t ret;
 
+       WRITE_ONCE(iocb->private, NULL);
+
        if (likely(!bio->bi_status)) {
                ret = dio->size;
                iocb->ki_pos += ret;
index 626c840..9eca1f7 100644 (file)
@@ -548,6 +548,20 @@ out_free_ext_minor:
 }
 EXPORT_SYMBOL(device_add_disk);
 
+/**
+ * blk_mark_disk_dead - mark a disk as dead
+ * @disk: disk to mark as dead
+ *
+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * to this disk.
+ */
+void blk_mark_disk_dead(struct gendisk *disk)
+{
+       set_bit(GD_DEAD, &disk->state);
+       blk_queue_start_drain(disk->queue);
+}
+EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
+
 /**
  * del_gendisk - remove the gendisk
  * @disk: the struct gendisk to remove
index e1ea185..c8289b7 100644 (file)
@@ -25,12 +25,9 @@ struct alg_type_list {
        struct list_head list;
 };
 
-static atomic_long_t alg_memory_allocated;
-
 static struct proto alg_proto = {
        .name                   = "ALG",
        .owner                  = THIS_MODULE,
-       .memory_allocated       = &alg_memory_allocated,
        .obj_size               = sizeof(struct alg_sock),
 };
 
index a366cb3..76fdaa1 100644 (file)
@@ -1324,3 +1324,4 @@ module_exit(crypto_algapi_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cryptographic algorithms API");
+MODULE_SOFTDEP("pre: cryptomgr");
index cf0869d..7ddfe94 100644 (file)
@@ -643,4 +643,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done);
 
 MODULE_DESCRIPTION("Cryptographic core API");
 MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: cryptomgr");
index 580ec79..78ca498 100644 (file)
@@ -44,6 +44,7 @@ static struct var_t vars[] = {
        { CAPS_START, .u.s = {"[:dv ap 160] " } },
        { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
        { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
+       { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
        { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
        { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
        { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
index 3b23fb7..f2f8f05 100644 (file)
@@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
        res[0].start = pmcg->page0_base_address;
        res[0].end = pmcg->page0_base_address + SZ_4K - 1;
        res[0].flags = IORESOURCE_MEM;
-       res[1].start = pmcg->page1_base_address;
-       res[1].end = pmcg->page1_base_address + SZ_4K - 1;
-       res[1].flags = IORESOURCE_MEM;
+       /*
+        * The initial version in DEN0049C lacked a way to describe register
+        * page 1, which makes it broken for most PMCG implementations; in
+        * that case, just let the driver fail gracefully if it expects to
+        * find a second memory resource.
+        */
+       if (node->revision > 0) {
+               res[1].start = pmcg->page1_base_address;
+               res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+               res[1].flags = IORESOURCE_MEM;
+       }
 
        if (pmcg->overflow_gsiv)
                acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
index 0077d2c..4671038 100644 (file)
@@ -2065,6 +2065,16 @@ bool acpi_ec_dispatch_gpe(void)
        if (acpi_any_gpe_status_set(first_ec->gpe))
                return true;
 
+       /*
+        * Cancel the SCI wakeup and process all pending events in case there
+        * are any wakeup ones in there.
+        *
+        * Note that if any non-EC GPEs are active at this point, the SCI will
+        * retrigger after the rearming in acpi_s2idle_wake(), so no events
+        * should be missed by canceling the wakeup here.
+        */
+       pm_system_cancel_wakeup();
+
        /*
         * Dispatch the EC GPE in-band, but do not report wakeup in any case
         * to allow the caller to process events properly after that.
index 86560a2..f8e9fa8 100644 (file)
@@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
          DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
          DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
         (void *)1},
+       /* T40 can not handle C3 idle state */
+       { set_max_cstate, "IBM ThinkPad T40", {
+         DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+         DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
+        (void *)2},
        {},
 };
 
index a60ff5d..d4fbea9 100644 (file)
@@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void)
                        return true;
                }
 
-               /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+               /*
+                * Check non-EC GPE wakeups and if there are none, cancel the
+                * SCI-related wakeup and dispatch the EC GPE.
+                */
                if (acpi_ec_dispatch_gpe()) {
                        pm_pr_dbg("ACPI non-EC GPE wakeup\n");
                        return true;
                }
 
-               /*
-                * Cancel the SCI wakeup and process all pending events in case
-                * there are any wakeup ones in there.
-                *
-                * Note that if any non-EC GPEs are active at this point, the
-                * SCI will retrigger after the rearming below, so no events
-                * should be missed by canceling the wakeup here.
-                */
-               pm_system_cancel_wakeup();
                acpi_os_wait_events_complete();
 
                /*
@@ -764,6 +758,7 @@ bool acpi_s2idle_wake(void)
                        return true;
                }
 
+               pm_wakeup_clear(acpi_sci_irq);
                rearm_wake_irq(acpi_sci_irq);
        }
 
index 0741a49..34600b5 100644 (file)
@@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array(
 
        acpi_get_table(id, instance, &table_header);
        if (!table_header) {
-               pr_warn("%4.4s not present\n", id);
+               pr_debug("%4.4s not present\n", id);
                return -ENODEV;
        }
 
index abc06e7..ed889f8 100644 (file)
@@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
                mem_sleep_current = PM_SUSPEND_TO_IDLE;
 
        /*
-        * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
-        * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
-        * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
-        *
-        * Only enable on !AMD as enabling this universally causes problems for a number
-        * of AMD based systems.
+        * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+        * EC GPE to be enabled while suspended for certain wakeup devices to
+        * work, so mark it as wakeup-capable.
         */
-       if (!acpi_s2idle_vendor_amd())
-               acpi_ec_mark_gpe_for_wake();
+       acpi_ec_mark_gpe_for_wake();
 
        return 0;
 }
index e1b1dd2..0c854ae 100644 (file)
@@ -2448,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev)
        struct ata_cpr_log *cpr_log = NULL;
        u8 *desc, *buf = NULL;
 
-       if (!ata_identify_page_supported(dev,
-                                ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+       if (ata_id_major_version(dev->id) < 11 ||
+           !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
                goto out;
 
        /*
-        * Read IDENTIFY DEVICE data log, page 0x47
-        * (concurrent positioning ranges). We can have at most 255 32B range
-        * descriptors plus a 64B header.
+        * Read the concurrent positioning ranges log (0x47). We can have at
+        * most 255 32B range descriptors plus a 64B header.
         */
        buf_len = (64 + 255 * 32 + 511) & ~511;
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                goto out;
 
-       err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
-                                    ATA_LOG_CONCURRENT_POSITIONING_RANGES,
-                                    buf, buf_len >> 9);
+       err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+                                    0, buf, buf_len >> 9);
        if (err_mask)
                goto out;
 
@@ -4031,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 
        /* devices that don't properly handle TRIM commands */
        { "SuperSSpeed S238*",          NULL,   ATA_HORKAGE_NOTRIM, },
+       { "M88V29*",                    NULL,   ATA_HORKAGE_NOTRIM, },
 
        /*
         * As defined, the DRAT (Deterministic Read After Trim) and RZAT
index 7abc7e0..6fa4a2f 100644 (file)
@@ -919,6 +919,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
        irqmask &= ~0x10;
        pci_write_config_byte(dev, 0x5a, irqmask);
 
+       /*
+        * HPT371 chips physically have only one channel, the secondary one,
+        * but the primary channel registers do exist!  Go figure...
+        * So,  we manually disable the non-existing channel here
+        * (if the BIOS hasn't done this already).
+        */
+       if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+               u8 mcr1;
+
+               pci_read_config_byte(dev, 0x50, &mcr1);
+               mcr1 &= ~0x04;
+               pci_write_config_byte(dev, 0x50, mcr1);
+       }
+
        /*
         * default to pci clock. make sure MA15/16 are set to output
         * to prevent drives having problems with 40-pin cables. Needed
@@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 
        if ((freq >> 12) != 0xABCDE) {
                int i;
-               u8 sr;
+               u16 sr;
                u32 total = 0;
 
                dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
 
                /* This is the process the HPT371 BIOS is reported to use */
                for (i = 0; i < 128; i++) {
-                       pci_read_config_byte(dev, 0x78, &sr);
+                       pci_read_config_word(dev, 0x78, &sr);
                        total += sr & 0x1FF;
                        udelay(15);
                }
index da01521..556034a 100644 (file)
@@ -322,7 +322,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
 static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       return sysfs_emit(buf, "%d      %d\n",
+       return sysfs_emit(buf, "%u      %u\n",
                        intr_coalescing_count, intr_coalescing_ticks);
 }
 
@@ -332,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
 {
        unsigned int coalescing_count,  coalescing_ticks;
 
-       if (sscanf(buf, "%d%d",
-                               &coalescing_count,
-                               &coalescing_ticks) != 2) {
-               printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+       if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
+               dev_err(dev, "fsl-sata: wrong parameter format.\n");
                return -EINVAL;
        }
 
@@ -359,7 +357,7 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
        rx_watermark &= 0x1f;
        spin_unlock_irqrestore(&host->lock, flags);
 
-       return sysfs_emit(buf, "%d\n", rx_watermark);
+       return sysfs_emit(buf, "%u\n", rx_watermark);
 }
 
 static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
@@ -373,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
        void __iomem *csr_base = host_priv->csr_base;
        u32 temp;
 
-       if (sscanf(buf, "%d", &rx_watermark) != 1) {
-               printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+       if (kstrtouint(buf, 10, &rx_watermark) < 0) {
+               dev_err(dev, "fsl-sata: wrong parameter format.\n");
                return -EINVAL;
        }
 
@@ -382,8 +380,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
        temp = ioread32(csr_base + TRANSCFG);
        temp &= 0xffffffe0;
        iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
-
        spin_unlock_irqrestore(&host->lock, flags);
+
        return strlen(buf);
 }
 
index 3bc3c31..4f67404 100644 (file)
@@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev)
        dev->hw_base = pci_resource_start(pci_dev, 0);
 
        dev->base = ioremap(dev->hw_base, 0x1000);
+       if (!dev->base)
+               return 1;
 
        reset_chip (dev);
   
index 38ba086..2578b2d 100644 (file)
@@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
        if (buf[1] > 7)
                return 1;
 
-       i = 0;
+       i = 2;
        shift = 0;
        value = 0;
        while (*esc && i < LCD2S_CHARACTER_SIZE + 2) {
@@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
                        I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
                return -EIO;
 
+       lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
+       if (!lcd2s)
+               return -ENOMEM;
+
        /* Test, if the display is responding */
        err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
        if (err < 0)
@@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
        if (!lcd)
                return -ENOMEM;
 
-       lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL);
-       if (!lcd2s) {
-               err = -ENOMEM;
-               goto fail1;
-       }
-
        lcd->drvdata = lcd2s;
        lcd2s->i2c = i2c;
        lcd2s->charlcd = lcd;
@@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
        err = device_property_read_u32(&i2c->dev, "display-height-chars",
                        &lcd->height);
        if (err)
-               goto fail2;
+               goto fail1;
 
        err = device_property_read_u32(&i2c->dev, "display-width-chars",
                        &lcd->width);
        if (err)
-               goto fail2;
+               goto fail1;
 
        lcd->ops = &lcd2s_ops;
 
        err = charlcd_register(lcd2s->charlcd);
        if (err)
-               goto fail2;
+               goto fail1;
 
        i2c_set_clientdata(i2c, lcd2s);
        return 0;
 
-fail2:
-       kfree(lcd2s);
 fail1:
-       kfree(lcd);
+       charlcd_free(lcd2s->charlcd);
        return err;
 }
 
@@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c)
        struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c);
 
        charlcd_unregister(lcd2s->charlcd);
-       kfree(lcd2s->charlcd);
+       charlcd_free(lcd2s->charlcd);
        return 0;
 }
 
index 9eaaff2..f47cab2 100644 (file)
@@ -629,6 +629,9 @@ re_probe:
                        drv->remove(dev);
 
                devres_release_all(dev);
+               arch_teardown_dma_ops(dev);
+               kfree(dev->dma_range_map);
+               dev->dma_range_map = NULL;
                driver_sysfs_remove(dev);
                dev->driver = NULL;
                dev_set_drvdata(dev, NULL);
@@ -1209,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
 
                devres_release_all(dev);
                arch_teardown_dma_ops(dev);
+               kfree(dev->dma_range_map);
+               dev->dma_range_map = NULL;
                dev->driver = NULL;
                dev_set_drvdata(dev, NULL);
                if (dev->pm_domain && dev->pm_domain->dismiss)
index 99bda0d..8666590 100644 (file)
@@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state;
 bool events_check_enabled __read_mostly;
 
 /* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
 
 /* If greater than 0 and the system is suspending, terminate the suspend. */
 static atomic_t pm_abort_suspend __read_mostly;
@@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void)
        atomic_dec_if_positive(&pm_abort_suspend);
 }
 
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
 {
-       pm_wakeup_irq = 0;
-       if (reset)
+       raw_spin_lock_irq(&wakeup_irq_lock);
+
+       if (irq_number && wakeup_irq[0] == irq_number)
+               wakeup_irq[0] = wakeup_irq[1];
+       else
+               wakeup_irq[0] = 0;
+
+       wakeup_irq[1] = 0;
+
+       raw_spin_unlock_irq(&wakeup_irq_lock);
+
+       if (!irq_number)
                atomic_set(&pm_abort_suspend, 0);
 }
 
 void pm_system_irq_wakeup(unsigned int irq_number)
 {
-       if (pm_wakeup_irq == 0) {
-               pm_wakeup_irq = irq_number;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+       if (wakeup_irq[0] == 0)
+               wakeup_irq[0] = irq_number;
+       else if (wakeup_irq[1] == 0)
+               wakeup_irq[1] = irq_number;
+       else
+               irq_number = 0;
+
+       raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+       if (irq_number)
                pm_system_wakeup();
-       }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+       return wakeup_irq[0];
 }
 
 /**
index d265658..4a44625 100644 (file)
@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
                                ret = regmap_write(map, reg, d->mask_buf[i]);
                        if (d->chip->clear_ack) {
                                if (d->chip->ack_invert && !ret)
-                                       ret = regmap_write(map, reg,
-                                                          d->mask_buf[i]);
+                                       ret = regmap_write(map, reg, UINT_MAX);
                                else if (!ret)
-                                       ret = regmap_write(map, reg,
-                                                          ~d->mask_buf[i]);
+                                       ret = regmap_write(map, reg, 0);
                        }
                        if (ret != 0)
                                dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
                                                data->status_buf[i]);
                        if (chip->clear_ack) {
                                if (chip->ack_invert && !ret)
-                                       ret = regmap_write(map, reg,
-                                                       data->status_buf[i]);
+                                       ret = regmap_write(map, reg, UINT_MAX);
                                else if (!ret)
-                                       ret = regmap_write(map, reg,
-                                                       ~data->status_buf[i]);
+                                       ret = regmap_write(map, reg, 0);
                        }
                        if (ret != 0)
                                dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
                                        d->status_buf[i] & d->mask_buf[i]);
                        if (chip->clear_ack) {
                                if (chip->ack_invert && !ret)
-                                       ret = regmap_write(map, reg,
-                                               (d->status_buf[i] &
-                                                d->mask_buf[i]));
+                                       ret = regmap_write(map, reg, UINT_MAX);
                                else if (!ret)
-                                       ret = regmap_write(map, reg,
-                                               ~(d->status_buf[i] &
-                                                 d->mask_buf[i]));
+                                       ret = regmap_write(map, reg, 0);
                        }
                        if (ret != 0) {
                                dev_err(map->dev, "Failed to ack 0x%x: %d\n",
index 01cbbfc..19fe19e 100644 (file)
@@ -79,6 +79,7 @@
 #include <linux/ioprio.h>
 #include <linux/blk-cgroup.h>
 #include <linux/sched/mm.h>
+#include <linux/statfs.h>
 
 #include "loop.h"
 
@@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo)
                granularity = 0;
 
        } else {
+               struct kstatfs sbuf;
+
                max_discard_sectors = UINT_MAX >> 9;
-               granularity = inode->i_sb->s_blocksize;
+               if (!vfs_statfs(&file->f_path, &sbuf))
+                       granularity = sbuf.f_bsize;
+               else
+                       max_discard_sectors = 0;
        }
 
        if (max_discard_sectors) {
@@ -1082,7 +1088,7 @@ out_putf:
        return error;
 }
 
-static void __loop_clr_fd(struct loop_device *lo)
+static void __loop_clr_fd(struct loop_device *lo, bool release)
 {
        struct file *filp;
        gfp_t gfp = lo->old_gfp_mask;
@@ -1144,6 +1150,8 @@ static void __loop_clr_fd(struct loop_device *lo)
        /* let user-space know about this change */
        kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
        mapping_set_gfp_mask(filp->f_mapping, gfp);
+       /* This is safe: open() is still holding a reference. */
+       module_put(THIS_MODULE);
        blk_mq_unfreeze_queue(lo->lo_queue);
 
        disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
@@ -1151,52 +1159,44 @@ static void __loop_clr_fd(struct loop_device *lo)
        if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
                int err;
 
-               mutex_lock(&lo->lo_disk->open_mutex);
+               /*
+                * open_mutex has been held already in release path, so don't
+                * acquire it if this function is called in such case.
+                *
+                * If the reread partition isn't from release path, lo_refcnt
+                * must be at least one and it can only become zero when the
+                * current holder is released.
+                */
+               if (!release)
+                       mutex_lock(&lo->lo_disk->open_mutex);
                err = bdev_disk_changed(lo->lo_disk, false);
-               mutex_unlock(&lo->lo_disk->open_mutex);
+               if (!release)
+                       mutex_unlock(&lo->lo_disk->open_mutex);
                if (err)
                        pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
                                __func__, lo->lo_number, err);
                /* Device is gone, no point in returning error */
        }
 
+       /*
+        * lo->lo_state is set to Lo_unbound here after above partscan has
+        * finished. There cannot be anybody else entering __loop_clr_fd() as
+        * Lo_rundown state protects us from all the other places trying to
+        * change the 'lo' device.
+        */
        lo->lo_flags = 0;
        if (!part_shift)
                lo->lo_disk->flags |= GENHD_FL_NO_PART;
-
-       fput(filp);
-}
-
-static void loop_rundown_completed(struct loop_device *lo)
-{
        mutex_lock(&lo->lo_mutex);
        lo->lo_state = Lo_unbound;
        mutex_unlock(&lo->lo_mutex);
-       module_put(THIS_MODULE);
-}
-
-static void loop_rundown_workfn(struct work_struct *work)
-{
-       struct loop_device *lo = container_of(work, struct loop_device,
-                                             rundown_work);
-       struct block_device *bdev = lo->lo_device;
-       struct gendisk *disk = lo->lo_disk;
-
-       __loop_clr_fd(lo);
-       kobject_put(&bdev->bd_device.kobj);
-       module_put(disk->fops->owner);
-       loop_rundown_completed(lo);
-}
-
-static void loop_schedule_rundown(struct loop_device *lo)
-{
-       struct block_device *bdev = lo->lo_device;
-       struct gendisk *disk = lo->lo_disk;
 
-       __module_get(disk->fops->owner);
-       kobject_get(&bdev->bd_device.kobj);
-       INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
-       queue_work(system_long_wq, &lo->rundown_work);
+       /*
+        * Need not hold lo_mutex to fput backing file. Calling fput holding
+        * lo_mutex triggers a circular lock dependency possibility warning as
+        * fput can take open_mutex which is usually taken before lo_mutex.
+        */
+       fput(filp);
 }
 
 static int loop_clr_fd(struct loop_device *lo)
@@ -1228,8 +1228,7 @@ static int loop_clr_fd(struct loop_device *lo)
        lo->lo_state = Lo_rundown;
        mutex_unlock(&lo->lo_mutex);
 
-       __loop_clr_fd(lo);
-       loop_rundown_completed(lo);
+       __loop_clr_fd(lo, false);
        return 0;
 }
 
@@ -1754,7 +1753,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
                 * In autoclear mode, stop the loop thread
                 * and remove configuration after last close.
                 */
-               loop_schedule_rundown(lo);
+               __loop_clr_fd(lo, true);
                return;
        } else if (lo->lo_state == Lo_bound) {
                /*
index 918a7a2..082d4b6 100644 (file)
@@ -56,7 +56,6 @@ struct loop_device {
        struct gendisk          *lo_disk;
        struct mutex            lo_mutex;
        bool                    idr_visible;
-       struct work_struct      rundown_work;
 };
 
 struct loop_cmd {
index e6005c2..2b588b6 100644 (file)
@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
                        "Completion workers still active!\n");
        }
 
-       blk_set_queue_dying(dd->queue);
+       blk_mark_disk_dead(dd->disk);
        set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
 
        /* Clean up the block layer. */
index 4203cda..b844432 100644 (file)
@@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
                 * IO to complete/fail.
                 */
                blk_mq_freeze_queue(rbd_dev->disk->queue);
-               blk_set_queue_dying(rbd_dev->disk->queue);
+               blk_mark_disk_dead(rbd_dev->disk);
        }
 
        del_gendisk(rbd_dev->disk);
index c443cd6..8c415be 100644 (file)
@@ -76,9 +76,6 @@ struct virtio_blk {
         */
        refcount_t refs;
 
-       /* What host tells us, plus 2 for header & tailer. */
-       unsigned int sg_elems;
-
        /* Ida index - used to track minor number allocations. */
        int index;
 
@@ -322,8 +319,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t status;
        int err;
 
-       BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
        status = virtblk_setup_cmd(vblk->vdev, req, vbr);
        if (unlikely(status))
                return status;
@@ -783,8 +778,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        /* Prevent integer overflows and honor max vq size */
        sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
 
-       /* We need extra sg elements at head and tail. */
-       sg_elems += 2;
        vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
        if (!vblk) {
                err = -ENOMEM;
@@ -796,7 +789,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        mutex_init(&vblk->vdev_mutex);
 
        vblk->vdev = vdev;
-       vblk->sg_elems = sg_elems;
 
        INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
 
@@ -853,7 +845,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                set_disk_ro(vblk->disk, 1);
 
        /* We can handle whatever the host told us to handle. */
-       blk_queue_max_segments(q, vblk->sg_elems-2);
+       blk_queue_max_segments(q, sg_elems);
 
        /* No real sector limit. */
        blk_queue_max_hw_sectors(q, -1U);
@@ -925,9 +917,15 @@ static int virtblk_probe(struct virtio_device *vdev)
 
                virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
                             &v);
+
+               /*
+                * max_discard_seg == 0 is out of spec but we always
+                * handled it.
+                */
+               if (!v)
+                       v = sg_elems;
                blk_queue_max_discard_segments(q,
-                                              min_not_zero(v,
-                                                           MAX_DISCARD_SEGMENTS));
+                                              min(v, MAX_DISCARD_SEGMENTS));
 
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
        }
index ccd0dd0..ca71a05 100644 (file)
@@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info)
 
        /* No more blkif_request(). */
        blk_mq_stop_hw_queues(info->rq);
-       blk_set_queue_dying(info->rq);
+       blk_mark_disk_dead(info->gd);
        set_capacity(info->gd, 0);
 
        for_each_rinfo(info, rinfo, i) {
index 3a258a6..b798958 100644 (file)
@@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
        .config = &modem_foxconn_sdx55_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
        .dma_data_width = 32,
+       .mru_default = 32768,
        .sideband_wake = false,
 };
 
@@ -401,6 +402,7 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
        .config = &modem_mv31_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
        .dma_data_width = 32,
+       .mru_default = 32768,
 };
 
 static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
index 2359889..e3c4305 100644 (file)
@@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev)
        list_del(&portdev->list);
        spin_unlock_irq(&pdrvdata_lock);
 
+       /* Device is going away, exit any polling for buffers */
+       virtio_break_device(vdev);
+       if (use_multiport(portdev))
+               flush_work(&portdev->control_work);
+       else
+               flush_work(&portdev->config_work);
+
        /* Disable interrupts for vqs */
        virtio_reset_device(vdev);
        /* Finish up work that's lined up */
index 744d136..15d6179 100644 (file)
@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
        },
 
        [JZ4725B_CLK_I2S] = {
-               "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+               "i2s", CGU_CLK_MUX | CGU_CLK_DIV,
                .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
                .mux = { CGU_REG_CPCCR, 31, 1 },
                .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
-               .gate = { CGU_REG_CLKGR, 6 },
        },
 
        [JZ4725B_CLK_SPI] = {
index 71aa630..f094999 100644 (file)
@@ -108,42 +108,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
        { .hw = &gpll4.clkr.hw },
 };
 
-static struct clk_rcg2 system_noc_clk_src = {
-       .cmd_rcgr = 0x0120,
-       .hid_width = 5,
-       .parent_map = gcc_xo_gpll0_map,
-       .clkr.hw.init = &(struct clk_init_data){
-               .name = "system_noc_clk_src",
-               .parent_data = gcc_xo_gpll0,
-               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
-               .ops = &clk_rcg2_ops,
-       },
-};
-
-static struct clk_rcg2 config_noc_clk_src = {
-       .cmd_rcgr = 0x0150,
-       .hid_width = 5,
-       .parent_map = gcc_xo_gpll0_map,
-       .clkr.hw.init = &(struct clk_init_data){
-               .name = "config_noc_clk_src",
-               .parent_data = gcc_xo_gpll0,
-               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
-               .ops = &clk_rcg2_ops,
-       },
-};
-
-static struct clk_rcg2 periph_noc_clk_src = {
-       .cmd_rcgr = 0x0190,
-       .hid_width = 5,
-       .parent_map = gcc_xo_gpll0_map,
-       .clkr.hw.init = &(struct clk_init_data){
-               .name = "periph_noc_clk_src",
-               .parent_data = gcc_xo_gpll0,
-               .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
-               .ops = &clk_rcg2_ops,
-       },
-};
-
 static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
        F(50000000, P_GPLL0, 12, 0, 0),
        F(100000000, P_GPLL0, 6, 0, 0),
@@ -1150,8 +1114,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
                .enable_mask = BIT(17),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp1_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1435,8 +1397,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
                .enable_mask = BIT(15),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_blsp2_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1764,8 +1724,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_lpass_q6_axi_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1778,8 +1736,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_mss_q6_bimc_axi_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1807,9 +1763,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_cfg_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1822,9 +1775,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_mstr_axi_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1854,9 +1804,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_0_slv_axi_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1884,9 +1831,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_cfg_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1899,9 +1843,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_mstr_axi_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1930,9 +1871,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pcie_1_slv_axi_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1960,8 +1898,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_pdm_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -1989,9 +1925,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc1_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2004,9 +1937,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc2_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2034,9 +1964,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc3_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2064,9 +1991,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_sdcc4_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2124,8 +2048,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_tsif_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2153,8 +2075,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2198,8 +2118,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_rx_symbol_0_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2213,8 +2131,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_rx_symbol_1_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2243,8 +2159,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_tx_symbol_0_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2258,8 +2172,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_ufs_tx_symbol_1_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2364,8 +2276,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb_hs_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2488,8 +2398,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
                .enable_mask = BIT(10),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_boot_rom_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2503,8 +2411,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
                .enable_mask = BIT(13),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_prng_ahb_clk",
-                       .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
-                       .num_parents = 1,
                        .ops = &clk_branch2_ops,
                },
        },
@@ -2547,9 +2453,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
        [GPLL0] = &gpll0.clkr,
        [GPLL4_EARLY] = &gpll4_early.clkr,
        [GPLL4] = &gpll4.clkr,
-       [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
-       [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
-       [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
        [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
        [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
        [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -2696,6 +2599,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
        [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
        [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
        [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+
+       /*
+        * The following clocks should NOT be managed by this driver, but they once were
+        * mistakengly added. Now they are only here to indicate that they are not defined
+        * on purpose, even though the names will stay in the header file (for ABI sanity).
+        */
+       [CONFIG_NOC_CLK_SRC] = NULL,
+       [PERIPH_NOC_CLK_SRC] = NULL,
+       [SYSTEM_NOC_CLK_SRC] = NULL,
 };
 
 static struct gdsc *gcc_msm8994_gdscs[] = {
index b6f9796..1fccb45 100644 (file)
@@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void)
        bool quirk_unreliable_oscillator = false;
 
        /* Quirk unreliable 32 KiHz oscillator with incomplete dts */
-       if (of_machine_is_compatible("ti,omap3-beagle") ||
-           of_machine_is_compatible("timll,omap3-devkit8000")) {
+       if (of_machine_is_compatible("ti,omap3-beagle-ab4")) {
                quirk_unreliable_oscillator = true;
                counter_32k = -ENODEV;
        }
index b8d9553..80f535c 100644 (file)
@@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu)
 
        kobject_uevent(&policy->kobj, KOBJ_ADD);
 
+       /* Callback for handling stuff after policy is ready */
+       if (cpufreq_driver->ready)
+               cpufreq_driver->ready(policy);
+
        if (cpufreq_thermal_control_enabled(cpufreq_driver))
                policy->cdev = of_cpufreq_cooling_register(policy);
 
index 05f3d78..effbb68 100644 (file)
@@ -388,7 +388,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
 
        snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
        ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
-                                  IRQF_ONESHOT, data->irq_name, data);
+                                  IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
        if (ret) {
                dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
                return 0;
@@ -542,6 +542,14 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+       struct qcom_cpufreq_data *data = policy->driver_data;
+
+       if (data->throttle_irq >= 0)
+               enable_irq(data->throttle_irq);
+}
+
 static struct freq_attr *qcom_cpufreq_hw_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
        &cpufreq_freq_attr_scaling_boost_freqs,
@@ -561,6 +569,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
        .fast_switch    = qcom_cpufreq_hw_fast_switch,
        .name           = "qcom-cpufreq-hw",
        .attr           = qcom_cpufreq_hw_attr,
+       .ready          = qcom_cpufreq_ready,
 };
 
 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
index 4c8ebdf..1b4d425 100644 (file)
@@ -1753,7 +1753,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
        char engs_info[2 * OTX2_CPT_NAME_LENGTH];
        struct otx2_cpt_eng_grp_info *grp;
        struct otx2_cpt_engs_rsvd *engs;
-       u32 mask[4];
        int i, j;
 
        pr_debug("Engine groups global info");
@@ -1785,6 +1784,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
                for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
                        engs = &grp->engs[j];
                        if (engs->type) {
+                               u32 mask[5] = { };
+
                                get_engs_info(grp, engs_info,
                                              2 * OTX2_CPT_NAME_LENGTH, j);
                                pr_debug("Slot%d: %s", j, engs_info);
index a1da2b4..1476156 100644 (file)
@@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
                __func__, atchan->irq_status);
 
        if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
-           !(atchan->irq_status & error_mask))
+           !(atchan->irq_status & error_mask)) {
+               spin_unlock_irq(&atchan->lock);
                return;
+       }
 
        if (atchan->irq_status & error_mask)
                at_xdmac_handle_error(atchan);
index 8a6bf29..daafea5 100644 (file)
@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
        if (!cmd_q->qbase) {
                dev_err(dev, "unable to allocate command queue\n");
                ret = -ENOMEM;
-               goto e_dma_alloc;
+               goto e_destroy_pool;
        }
 
        cmd_q->qidx = 0;
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
 
        /* Request an irq */
        ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
-       if (ret)
-               goto e_pool;
+       if (ret) {
+               dev_err(dev, "unable to allocate an IRQ\n");
+               goto e_free_dma;
+       }
 
        /* Update the device registers with queue information. */
        cmd_q->qcontrol &= ~CMD_Q_SIZE;
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
        /* Register the DMA engine support */
        ret = pt_dmaengine_register(pt);
        if (ret)
-               goto e_dmaengine;
+               goto e_free_irq;
 
        /* Set up debugfs entries */
        ptdma_debugfs_setup(pt);
 
        return 0;
 
-e_dmaengine:
+e_free_irq:
        free_irq(pt->pt_irq, pt);
 
-e_dma_alloc:
+e_free_dma:
        dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
 
-e_pool:
-       dev_err(dev, "unable to allocate an IRQ\n");
+e_destroy_pool:
        dma_pool_destroy(pt->cmd_q.dma_pool);
 
        return ret;
index 481f45c..13d12d6 100644 (file)
@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
 
        dmac->dev = &pdev->dev;
        platform_set_drvdata(pdev, dmac);
-       dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
-       dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+       ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+       if (ret)
+               return ret;
+
+       ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+       if (ret)
+               return ret;
 
        ret = rcar_dmac_parse_of(&pdev->dev, dmac);
        if (ret < 0)
index 158e5e7..b26ed69 100644 (file)
@@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
                ret = pm_runtime_get(schan->dev);
 
                spin_unlock_irq(&schan->chan_lock);
-               if (ret < 0)
+               if (ret < 0) {
                        dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+                       pm_runtime_put(schan->dev);
+               }
 
                pm_runtime_barrier(schan->dev);
 
index a421643..d5d5573 100644 (file)
@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
        ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
                                     &stm32_dmamux->dmarouter);
        if (ret)
-               goto err_clk;
+               goto pm_disable;
 
        return 0;
 
+pm_disable:
+       pm_runtime_disable(&pdev->dev);
 err_clk:
        clk_disable_unprepare(stm32_dmamux->clk);
 
index 9d9aabd..f5677d8 100644 (file)
@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
        else
                return (char *)ptr;
 
-       r = (unsigned long)p % align;
+       r = (unsigned long)ptr % align;
 
        if (r == 0)
                return (char *)ptr;
index b406b3f..d76bab3 100644 (file)
@@ -2112,7 +2112,7 @@ static void __exit scmi_driver_exit(void)
 }
 module_exit(scmi_driver_exit);
 
-MODULE_ALIAS("platform: arm-scmi");
+MODULE_ALIAS("platform:arm-scmi");
 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
 MODULE_DESCRIPTION("ARM SCMI protocol driver");
 MODULE_LICENSE("GPL v2");
index 380e4e2..9c46084 100644 (file)
@@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
 
 static u32 hartid;
 
-static u32 get_boot_hartid_from_fdt(void)
+static int get_boot_hartid_from_fdt(void)
 {
        const void *fdt;
        int chosen_node, len;
@@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void)
 
        fdt = get_efi_config_table(DEVICE_TREE_GUID);
        if (!fdt)
-               return U32_MAX;
+               return -EINVAL;
 
        chosen_node = fdt_path_offset(fdt, "/chosen");
        if (chosen_node < 0)
-               return U32_MAX;
+               return -EINVAL;
 
        prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
        if (!prop || len != sizeof(u32))
-               return U32_MAX;
+               return -EINVAL;
 
-       return fdt32_to_cpu(*prop);
+       hartid = fdt32_to_cpu(*prop);
+       return 0;
 }
 
 efi_status_t check_platform_features(void)
 {
-       hartid = get_boot_hartid_from_fdt();
-       if (hartid == U32_MAX) {
+       int ret;
+
+       ret = get_boot_hartid_from_fdt();
+       if (ret) {
                efi_err("/chosen/boot-hartid missing or invalid!\n");
                return EFI_UNSUPPORTED;
        }
index abdc8a6..cae590b 100644 (file)
@@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
 {
        const struct efivar_operations *ops;
        efi_status_t status;
+       unsigned long varsize;
 
        if (!__efivars)
                return -EINVAL;
@@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
                return efivar_entry_set_nonblocking(name, vendor, attributes,
                                                    size, data);
 
+       varsize = size + ucs2_strsize(name, 1024);
        if (!block) {
                if (down_trylock(&efivars_lock))
                        return -EBUSY;
+               status = check_var_size_nonblocking(attributes, varsize);
        } else {
                if (down_interruptible(&efivars_lock))
                        return -EINTR;
+               status = check_var_size(attributes, varsize);
        }
 
-       status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
        if (status != EFI_SUCCESS) {
                up(&efivars_lock);
                return -ENOSPC;
index 869dc95..0cb2664 100644 (file)
@@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
 {
        struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
 
-       return gpiod_get_value(fwd->descs[offset]);
+       return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
+                              : gpiod_get_value(fwd->descs[offset]);
 }
 
 static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
        for_each_set_bit(i, mask, fwd->chip.ngpio)
                descs[j++] = fwd->descs[i];
 
-       error = gpiod_get_array_value(j, descs, NULL, values);
+       if (fwd->chip.can_sleep)
+               error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
+       else
+               error = gpiod_get_array_value(j, descs, NULL, values);
        if (error)
                return error;
 
@@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
 {
        struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
 
-       gpiod_set_value(fwd->descs[offset], value);
+       if (chip->can_sleep)
+               gpiod_set_value_cansleep(fwd->descs[offset], value);
+       else
+               gpiod_set_value(fwd->descs[offset], value);
 }
 
 static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
                descs[j++] = fwd->descs[i];
        }
 
-       gpiod_set_array_value(j, descs, NULL, values);
+       if (fwd->chip.can_sleep)
+               gpiod_set_array_value_cansleep(j, descs, NULL, values);
+       else
+               gpiod_set_array_value(j, descs, NULL, values);
 }
 
 static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
index a4c4e45..099e358 100644 (file)
@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
        level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
        polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
 
-       switch (type) {
-       case IRQ_TYPE_EDGE_BOTH:
+       if (type == IRQ_TYPE_EDGE_BOTH) {
                if (bank->gpio_type == GPIO_TYPE_V2) {
-                       bank->toggle_edge_mode &= ~mask;
                        rockchip_gpio_writel_bit(bank, d->hwirq, 1,
                                                 bank->gpio_regs->int_bothedge);
                        goto out;
@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
                        else
                                polarity |= mask;
                }
-               break;
-       case IRQ_TYPE_EDGE_RISING:
-               bank->toggle_edge_mode &= ~mask;
-               level |= mask;
-               polarity |= mask;
-               break;
-       case IRQ_TYPE_EDGE_FALLING:
-               bank->toggle_edge_mode &= ~mask;
-               level |= mask;
-               polarity &= ~mask;
-               break;
-       case IRQ_TYPE_LEVEL_HIGH:
-               bank->toggle_edge_mode &= ~mask;
-               level &= ~mask;
-               polarity |= mask;
-               break;
-       case IRQ_TYPE_LEVEL_LOW:
-               bank->toggle_edge_mode &= ~mask;
-               level &= ~mask;
-               polarity &= ~mask;
-               break;
-       default:
-               ret = -EINVAL;
-               goto out;
+       } else {
+               if (bank->gpio_type == GPIO_TYPE_V2) {
+                       rockchip_gpio_writel_bit(bank, d->hwirq, 0,
+                                                bank->gpio_regs->int_bothedge);
+               } else {
+                       bank->toggle_edge_mode &= ~mask;
+               }
+               switch (type) {
+               case IRQ_TYPE_EDGE_RISING:
+                       level |= mask;
+                       polarity |= mask;
+                       break;
+               case IRQ_TYPE_EDGE_FALLING:
+                       level |= mask;
+                       polarity &= ~mask;
+                       break;
+               case IRQ_TYPE_LEVEL_HIGH:
+                       level &= ~mask;
+                       polarity |= mask;
+                       break;
+               case IRQ_TYPE_LEVEL_LOW:
+                       level &= ~mask;
+                       polarity &= ~mask;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto out;
+               }
        }
 
        rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
index 403f9e8..7d82388 100644 (file)
@@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
                         NULL,
                         chip->base + SIFIVE_GPIO_OUTPUT_EN,
                         chip->base + SIFIVE_GPIO_INPUT_EN,
-                        0);
+                        BGPIOF_READ_OUTPUT_REG_SET);
        if (ret) {
                dev_err(dev, "unable to init generic GPIO\n");
                return ret;
index 04b137e..153fe79 100644 (file)
@@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item)
        return container_of(group, struct gpio_sim_bank, group);
 }
 
+static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank)
+{
+       return bank->label && *bank->label;
+}
+
 static struct gpio_sim_device *
 gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
 {
@@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
                         * point the device doesn't exist yet and so dev_name()
                         * is not available.
                         */
-                       hog->chip_label = kasprintf(GFP_KERNEL,
-                                                   "gpio-sim.%u-%s", dev->id,
-                                                   fwnode_get_name(bank->swnode));
+                       if (gpio_sim_bank_has_label(bank))
+                               hog->chip_label = kstrdup(bank->label,
+                                                         GFP_KERNEL);
+                       else
+                               hog->chip_label = kasprintf(GFP_KERNEL,
+                                                       "gpio-sim.%u-%s",
+                                                       dev->id,
+                                                       fwnode_get_name(
+                                                               bank->swnode));
                        if (!hog->chip_label) {
                                gpio_sim_remove_hogs(dev);
                                return -ENOMEM;
@@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
 
        properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
 
-       if (bank->label && (strlen(bank->label) > 0))
+       if (gpio_sim_bank_has_label(bank))
                properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
                                                               bank->label);
 
index 34b36a8..8d298be 100644 (file)
@@ -343,9 +343,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
        return offset + pin;
 }
 
+#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
+
 static void tegra186_irq_ack(struct irq_data *data)
 {
-       struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+       struct tegra_gpio *gpio = to_tegra_gpio(gc);
        void __iomem *base;
 
        base = tegra186_gpio_get_base(gpio, data->hwirq);
@@ -357,7 +360,8 @@ static void tegra186_irq_ack(struct irq_data *data)
 
 static void tegra186_irq_mask(struct irq_data *data)
 {
-       struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+       struct tegra_gpio *gpio = to_tegra_gpio(gc);
        void __iomem *base;
        u32 value;
 
@@ -372,7 +376,8 @@ static void tegra186_irq_mask(struct irq_data *data)
 
 static void tegra186_irq_unmask(struct irq_data *data)
 {
-       struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+       struct tegra_gpio *gpio = to_tegra_gpio(gc);
        void __iomem *base;
        u32 value;
 
@@ -387,7 +392,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
 
 static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
 {
-       struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+       struct tegra_gpio *gpio = to_tegra_gpio(gc);
        void __iomem *base;
        u32 value;
 
index c7b5446..ffa0256 100644 (file)
@@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                        goto out_free_lh;
                }
 
-               ret = gpiod_request(desc, lh->label);
+               ret = gpiod_request_user(desc, lh->label);
                if (ret)
                        goto out_free_lh;
                lh->descs[i] = desc;
@@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
                        goto out_free_linereq;
                }
 
-               ret = gpiod_request(desc, lr->label);
+               ret = gpiod_request_user(desc, lr->label);
                if (ret)
                        goto out_free_linereq;
 
@@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
                }
        }
 
-       ret = gpiod_request(desc, le->label);
+       ret = gpiod_request_user(desc, le->label);
        if (ret)
                goto out_free_le;
        le->desc = desc;
index 4098bc7..44c1ad5 100644 (file)
@@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class,
         * they may be undone on its behalf too.
         */
 
-       status = gpiod_request(desc, "sysfs");
-       if (status) {
-               if (status == -EPROBE_DEFER)
-                       status = -ENODEV;
+       status = gpiod_request_user(desc, "sysfs");
+       if (status)
                goto done;
-       }
 
        status = gpiod_set_transitory(desc, false);
        if (!status) {
index 3859911..a3d1427 100644 (file)
@@ -3147,6 +3147,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
 
                return retirq;
        }
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+       if (gc->irq.chip) {
+               /*
+                * Avoid race condition with other code, which tries to lookup
+                * an IRQ before the irqchip has been properly registered,
+                * i.e. while gpiochip is still being brought up.
+                */
+               return -EPROBE_DEFER;
+       }
+#endif
        return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(gpiod_to_irq);
index 30bc3f8..c31f462 100644 (file)
@@ -135,6 +135,18 @@ struct gpio_desc {
 
 int gpiod_request(struct gpio_desc *desc, const char *label);
 void gpiod_free(struct gpio_desc *desc);
+
+static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
+{
+       int ret;
+
+       ret = gpiod_request(desc, label);
+       if (ret == -EPROBE_DEFER)
+               ret = -ENODEV;
+
+       return ret;
+}
+
 int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
                unsigned long lflags, enum gpiod_flags dflags);
 int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
index 82011e7..c4387b3 100644 (file)
@@ -1141,7 +1141,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
        if (ret)
                return ret;
 
-       if (!dev->mode_config.allow_fb_modifiers) {
+       if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) {
                drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
                              "GFX9+ requires FB check based on format modifier\n");
                ret = check_tiling_flags_gfx6(rfb);
index 63a0899..0ead08b 100644 (file)
@@ -2011,6 +2011,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                        return -ENODEV;
        }
 
+       if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+               amdgpu_aspm = 0;
+
        if (amdgpu_virtual_display ||
            amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
                supports_atomic = true;
index d99c877..5224d9a 100644 (file)
@@ -391,7 +391,6 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
                                                int index)
 {
        struct drm_plane *plane;
-       uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID};
        int ret;
 
        plane = kzalloc(sizeof(*plane), GFP_KERNEL);
@@ -402,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
                                       &amdgpu_vkms_plane_funcs,
                                       amdgpu_vkms_formats,
                                       ARRAY_SIZE(amdgpu_vkms_formats),
-                                      modifiers, type, NULL);
+                                      NULL, type, NULL);
        if (ret) {
                kfree(plane);
                return ERR_PTR(ret);
index b37fc7d..418341a 100644 (file)
@@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * Check if all VM PDs/PTs are ready for updates
  *
  * Returns:
- * True if eviction list is empty.
+ * True if VM is not evicting.
  */
 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 {
-       return list_empty(&vm->evicted);
+       bool ret;
+
+       amdgpu_vm_eviction_lock(vm);
+       ret = !vm->evicting;
+       amdgpu_vm_eviction_unlock(vm);
+
+       return ret && list_empty(&vm->evicted);
 }
 
 /**
index b4eddf6..ff738e9 100644 (file)
@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
                adev->gfx.config.max_sh_per_se *
                adev->gfx.config.max_shader_engines);
 
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+       switch (adev->ip_versions[GC_HWIP][0]) {
+       case IP_VERSION(10, 3, 1):
+       case IP_VERSION(10, 3, 3):
                /* Get SA disabled bitmap from eFuse setting */
                efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
                efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
                disabled_sa = tmp;
 
                WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+               break;
+       default:
+               break;
        }
 }
 
index e8e4749..f0638db 100644 (file)
@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* SMU saves SDMA state for us */
+       if (adev->in_s0ix)
+               return 0;
+
        return sdma_v4_0_hw_fini(adev);
 }
 
@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* SMU restores SDMA state for us */
+       if (adev->in_s0ix)
+               return 0;
+
        return sdma_v4_0_hw_init(adev);
 }
 
index 0fc1747..12f80fd 100644 (file)
@@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
 static int soc15_asic_reset(struct amdgpu_device *adev)
 {
        /* original raven doesn't have full asic reset */
-       if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
-           !(adev->apu_flags & AMD_APU_IS_RAVEN2))
+       if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+           (adev->apu_flags & AMD_APU_IS_RAVEN2))
                return 0;
 
        switch (soc15_asic_reset_method(adev)) {
@@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle)
                                AMD_CG_SUPPORT_SDMA_LS |
                                AMD_CG_SUPPORT_VCN_MGCG;
 
+                       /*
+                        * MMHUB PG needs to be disabled for Picasso for
+                        * stability reasons.
+                        */
                        adev->pg_flags = AMD_PG_SUPPORT_SDMA |
-                               AMD_PG_SUPPORT_MMHUB |
                                AMD_PG_SUPPORT_VCN;
                } else {
                        adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
index 7f9773f..075429b 100644 (file)
@@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 
        /* Use GRPH_PFLIP interrupt */
        for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
-                       i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+                       i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
                        i++) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
                if (r) {
@@ -4256,6 +4256,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        }
 #endif
 
+       /* Disable vblank IRQs aggressively for power-saving. */
+       adev_to_drm(adev)->vblank_disable_immediate = true;
+
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
@@ -4301,19 +4304,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                                update_connector_ext_caps(aconnector);
                        if (psr_feature_enabled)
                                amdgpu_dm_set_psr_caps(link);
+
+                       /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+                        * PSR is also supported.
+                        */
+                       if (link->psr_settings.psr_feature_enabled)
+                               adev_to_drm(adev)->vblank_disable_immediate = false;
                }
 
 
        }
 
-       /*
-        * Disable vblank IRQs aggressively for power-saving.
-        *
-        * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
-        * is also supported.
-        */
-       adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
-
        /* Software is initialized. Now we can register interrupt handlers. */
        switch (adev->asic_type) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
index f977f29..10c7be4 100644 (file)
@@ -473,8 +473,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
        clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
 
        /* Refresh bounding box */
+       DC_FP_START();
        clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
                        clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
+       DC_FP_END();
 }
 
 static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
index a1011f3..de3f464 100644 (file)
@@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
        result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
 
        if (result == VBIOSSMC_Result_Failed) {
-               ASSERT(0);
+               if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+                   param == TABLE_WATERMARKS)
+                       DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+               else
+                       ASSERT(0);
                REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
                return -1;
        }
index 6f5528d..ba1aa99 100644 (file)
@@ -985,10 +985,13 @@ static bool dc_construct(struct dc *dc,
                goto fail;
 #ifdef CONFIG_DRM_AMD_DC_DCN
        dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
-#endif
 
-       if (dc->res_pool->funcs->update_bw_bounding_box)
+       if (dc->res_pool->funcs->update_bw_bounding_box) {
+               DC_FP_START();
                dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+               DC_FP_END();
+       }
+#endif
 
        /* Creation of current_state must occur after dc->dml
         * is initialized in dc_create_resource_pool because
@@ -1220,6 +1223,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 
                dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
 
+               dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
                if (dc->res_pool->dmcu != NULL)
                        dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
        }
index b3912ff..18757c1 100644 (file)
@@ -1964,10 +1964,6 @@ enum dc_status dc_remove_stream_from_ctx(
                                dc->res_pool,
                        del_pipe->stream_res.stream_enc,
                        false);
-       /* Release link encoder from stream in new dc_state. */
-       if (dc->res_pool->funcs->link_enc_unassign)
-               dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
-
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(del_pipe)) {
                update_hpo_dp_stream_engine_usage(
index 288e7b0..b518648 100644 (file)
@@ -202,6 +202,7 @@ struct dc_caps {
        bool edp_dsc_support;
        bool vbios_lttpr_aware;
        bool vbios_lttpr_enable;
+       uint32_t max_otg_num;
 };
 
 struct dc_bug_wa {
index 26ec69b..eb2755b 100644 (file)
@@ -1834,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                                break;
                        }
                }
-               // We are trying to enable eDP, don't power down VDD
-               if (can_apply_edp_fast_boot)
+
+               /*
+                * TO-DO: So far the code logic below only addresses single eDP case.
+                * For dual eDP case, there are a few things that need to be
+                * implemented first:
+                *
+                * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+                * stream[0 or 1] will all be checked.
+                *
+                * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+                * for each eDP.
+                *
+                * Once above 2 things are completed, we can then change the logic below
+                * correspondingly, so dual eDP case will be fully covered.
+                */
+
+               // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+               if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
                        keep_edp_vdd_on = true;
+                       DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+               } else {
+                       DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+               }
        }
 
        // Check seamless boot support
index 2bc93df..2a72517 100644 (file)
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index 90c73a1..5e3bcaf 100644 (file)
@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
        ret_val = wm_ns * refclk_mhz;
        ret_val /= 1000;
 
-       if (ret_val > clamp_value)
+       if (ret_val > clamp_value) {
+               /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+               ASSERT(0);
                ret_val = clamp_value;
+       }
 
        return ret_val;
 }
@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
                hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
 
@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
                hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
        } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
                hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
 
@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
                hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
        } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
                hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
 
@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
                hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
        } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
                hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
 
@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
                hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
        } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->a.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->b.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->c.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->d.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
index e2cae97..48cc009 100644 (file)
@@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
                 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
                 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
-                attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
-                attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+                attr == &sensor_dev_attr_power2_label.dev_attr.attr))
                return 0;
 
        return effective_mode;
index a420729..5488a0e 100644 (file)
@@ -421,6 +421,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
        return 0;
 }
 
+static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t *board_reserved;
+       uint16_t *freq_table_gfx;
+       uint32_t i;
+
+       /* Fix some OEM SKU specific stability issues */
+       GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
+       if ((adev->pdev->device == 0x73DF) &&
+           (adev->pdev->revision == 0XC3) &&
+           (adev->pdev->subsystem_device == 0x16C2) &&
+           (adev->pdev->subsystem_vendor == 0x1043))
+               board_reserved[0] = 1387;
+
+       GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
+       if ((adev->pdev->device == 0x73DF) &&
+           (adev->pdev->revision == 0XC3) &&
+           ((adev->pdev->subsystem_device == 0x16C2) ||
+           (adev->pdev->subsystem_device == 0x133C)) &&
+           (adev->pdev->subsystem_vendor == 0x1043)) {
+               for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
+                       if (freq_table_gfx[i] > 2500)
+                               freq_table_gfx[i] = 2500;
+               }
+       }
+
+       return 0;
+}
+
 static int sienna_cichlid_setup_pptable(struct smu_context *smu)
 {
        int ret = 0;
@@ -441,7 +471,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
        if (ret)
                return ret;
 
-       return ret;
+       return sienna_cichlid_patch_pptable_quirk(smu);
 }
 
 static int sienna_cichlid_tables_init(struct smu_context *smu)
@@ -1238,21 +1268,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
                                &dpm_context->dpm_tables.soc_table;
        struct smu_umd_pstate_table *pstate_table =
                                &smu->pstate_table;
+       struct amdgpu_device *adev = smu->adev;
 
        pstate_table->gfxclk_pstate.min = gfx_table->min;
        pstate_table->gfxclk_pstate.peak = gfx_table->max;
-       if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
-               pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
 
        pstate_table->uclk_pstate.min = mem_table->min;
        pstate_table->uclk_pstate.peak = mem_table->max;
-       if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
-               pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
 
        pstate_table->socclk_pstate.min = soc_table->min;
        pstate_table->socclk_pstate.peak = soc_table->max;
-       if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+       switch (adev->asic_type) {
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
+               pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+               pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
                pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+               break;
+       case CHIP_DIMGREY_CAVEFISH:
+               pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+               pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+               pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+               break;
+       case CHIP_BEIGE_GOBY:
+               pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+               pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+               pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+               break;
+       default:
+               break;
+       }
 
        return 0;
 }
index 38cd0ec..42f705c 100644 (file)
@@ -33,6 +33,14 @@ typedef enum {
 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK    960
 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK    1000
 
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
 extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
 
 #endif
index caf1775..0bc84b7 100644 (file)
@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
 
 static int yellow_carp_mode_reset(struct smu_context *smu, int type)
 {
-       int ret = 0, index = 0;
-
-       index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
-                               SMU_MSG_GfxDeviceDriverReset);
-       if (index < 0)
-               return index == -EACCES ? 0 : index;
+       int ret = 0;
 
-       ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
        if (ret)
                dev_err(smu->adev->dev, "Failed to mode reset!\n");
 
index 58a2428..6e3f1d6 100644 (file)
@@ -6,6 +6,7 @@ config DRM_HDLCD
        depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
        depends on COMMON_CLK
        select DRM_KMS_HELPER
+       select DRM_GEM_CMA_HELPER
        help
          Choose this option if you have an ARM High Definition Colour LCD
          controller.
index a7389a0..af07eeb 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/bitfield.h>
+#include <linux/bits.h>
 #include <linux/clk.h>
 #include <linux/irq.h>
 #include <linux/math64.h>
@@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
 /*
  * ui2bc - UI time periods to byte clock cycles
  */
-static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+static u32 ui2bc(unsigned int ui)
 {
-       u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
-
-       return DIV64_U64_ROUND_UP(ui * dsi->lanes,
-                                 dsi->mode.clock * 1000 * bpp);
+       return DIV_ROUND_UP(ui, BITS_PER_BYTE);
 }
 
 /*
@@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi)
        }
 
        /* values in byte clock cycles */
-       cycles = ui2bc(dsi, cfg->clk_pre);
+       cycles = ui2bc(cfg->clk_pre);
        DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
        nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
        cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
        DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
-       cycles += ui2bc(dsi, cfg->clk_pre);
+       cycles += ui2bc(cfg->clk_pre);
        DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
        nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
        cycles = ps2bc(dsi, cfg->hs_exit);
index dab8f76..68d8415 100644 (file)
@@ -1802,6 +1802,7 @@ static inline void ti_sn_gpio_unregister(void) {}
 
 static void ti_sn65dsi86_runtime_disable(void *data)
 {
+       pm_runtime_dont_use_autosuspend(data);
        pm_runtime_disable(data);
 }
 
@@ -1861,11 +1862,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
                                     "failed to get reference clock\n");
 
        pm_runtime_enable(dev);
+       pm_runtime_set_autosuspend_delay(pdata->dev, 500);
+       pm_runtime_use_autosuspend(pdata->dev);
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev);
        if (ret)
                return ret;
-       pm_runtime_set_autosuspend_delay(pdata->dev, 500);
-       pm_runtime_use_autosuspend(pdata->dev);
 
        ti_sn65dsi86_debugfs_init(pdata);
 
index 9781722..54d62fd 100644 (file)
@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
        state->mode_blob = NULL;
 
        if (mode) {
+               struct drm_property_blob *blob;
+
                drm_mode_convert_to_umode(&umode, mode);
-               state->mode_blob =
-                       drm_property_create_blob(state->crtc->dev,
-                                                sizeof(umode),
-                                                &umode);
-               if (IS_ERR(state->mode_blob))
-                       return PTR_ERR(state->mode_blob);
+               blob = drm_property_create_blob(crtc->dev,
+                                               sizeof(umode), &umode);
+               if (IS_ERR(blob))
+                       return PTR_ERR(blob);
 
                drm_mode_copy(&state->mode, mode);
+
+               state->mode_blob = blob;
                state->enable = true;
                drm_dbg_atomic(crtc->dev,
                               "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
index a50c82b..76a8c70 100644 (file)
@@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
 void drm_connector_set_vrr_capable_property(
                struct drm_connector *connector, bool capable)
 {
+       if (!connector->vrr_capable_property)
+               return;
+
        drm_object_property_set_value(&connector->base,
                                      connector->vrr_capable_property,
                                      capable);
index 12893e7..f5f5de3 100644 (file)
@@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
        if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
                return quirks;
 
+       info->color_formats |= DRM_COLOR_FORMAT_RGB444;
        drm_parse_cea_ext(connector, edid);
 
        /*
@@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
        DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
                          connector->name, info->bpc);
 
-       info->color_formats |= DRM_COLOR_FORMAT_RGB444;
        if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
        if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
index cefd0cb..dc275c4 100644 (file)
@@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
         */
        vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
        vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_flags |= VM_DONTEXPAND;
 
        if (cma_obj->map_noncoherent) {
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
index beaf99e..b688841 100644 (file)
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state);
  *
  * The notifier is called with no locks held. The new hw_state and sw_state
  * can be retrieved using the drm_privacy_screen_get_state() function.
- * A pointer to the drm_privacy_screen's struct is passed as the void *data
+ * A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
  * argument of the notifier_block's notifier_call.
  *
  * The notifier will NOT be called when changes are made through
index 12571ac..c04264f 100644 (file)
@@ -678,7 +678,6 @@ static int decon_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct decon_context *ctx;
        struct device_node *i80_if_timings;
-       struct resource *res;
        int ret;
 
        if (!dev->of_node)
@@ -728,16 +727,11 @@ static int decon_probe(struct platform_device *pdev)
                goto err_iounmap;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                                          ctx->i80_if ? "lcd_sys" : "vsync");
-       if (!res) {
-               dev_err(dev, "irq request failed.\n");
-               ret = -ENXIO;
+       ret =  platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+       if (ret < 0)
                goto err_iounmap;
-       }
 
-       ret = devm_request_irq(dev, res->start, decon_irq_handler,
-                                                       0, "drm_decon", ctx);
+       ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
                goto err_iounmap;
index 32a3657..d13f5e3 100644 (file)
@@ -1334,8 +1334,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
        int ret;
        int te_gpio_irq;
 
-       dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN);
-       if (IS_ERR(dsi->te_gpio)) {
+       dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
+       if (!dsi->te_gpio) {
+               return 0;
+       } else if (IS_ERR(dsi->te_gpio)) {
                dev_err(dsi->dev, "gpio request failed with %ld\n",
                                PTR_ERR(dsi->te_gpio));
                return PTR_ERR(dsi->te_gpio);
index 023f54e..0ee32e4 100644 (file)
@@ -1267,7 +1267,6 @@ static int fimc_probe(struct platform_device *pdev)
        struct exynos_drm_ipp_formats *formats;
        struct device *dev = &pdev->dev;
        struct fimc_context *ctx;
-       struct resource *res;
        int ret;
        int i, j, num_limits, num_formats;
 
@@ -1330,14 +1329,12 @@ static int fimc_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->regs);
 
        /* resource irq */
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(dev, "failed to request irq resource.\n");
-               return -ENOENT;
-       }
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
+               return ret;
 
-       ret = devm_request_irq(dev, res->start, fimc_irq_handler,
-               0, dev_name(dev), ctx);
+       ret = devm_request_irq(dev, ret, fimc_irq_handler,
+                              0, dev_name(dev), ctx);
        if (ret < 0) {
                dev_err(dev, "failed to request irq.\n");
                return ret;
index c735e53..7d5a483 100644 (file)
@@ -1133,7 +1133,6 @@ static int fimd_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct fimd_context *ctx;
        struct device_node *i80_if_timings;
-       struct resource *res;
        int ret;
 
        if (!dev->of_node)
@@ -1206,15 +1205,11 @@ static int fimd_probe(struct platform_device *pdev)
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                                          ctx->i80_if ? "lcd_sys" : "vsync");
-       if (!res) {
-               dev_err(dev, "irq request failed.\n");
-               return -ENXIO;
-       }
+       ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+       if (ret < 0)
+               return ret;
 
-       ret = devm_request_irq(dev, res->start, fimd_irq_handler,
-                                                       0, "drm_fimd", ctx);
+       ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
                return ret;
index 166a802..964dceb 100644 (file)
@@ -1220,7 +1220,6 @@ static int gsc_probe(struct platform_device *pdev)
        struct gsc_driverdata *driver_data;
        struct exynos_drm_ipp_formats *formats;
        struct gsc_context *ctx;
-       struct resource *res;
        int num_formats, ret, i, j;
 
        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -1275,13 +1274,10 @@ static int gsc_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->regs);
 
        /* resource irq */
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(dev, "failed to request irq resource.\n");
-               return -ENOENT;
-       }
+       ctx->irq = platform_get_irq(pdev, 0);
+       if (ctx->irq < 0)
+               return ctx->irq;
 
-       ctx->irq = res->start;
        ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
                               dev_name(dev), ctx);
        if (ret < 0) {
index 41c54f1..e5204be 100644 (file)
@@ -809,19 +809,17 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
                return -ENXIO;
        }
 
-       res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
-       if (res == NULL) {
-               dev_err(dev, "get interrupt resource failed.\n");
-               return -ENXIO;
-       }
+       ret = platform_get_irq(mixer_ctx->pdev, 0);
+       if (ret < 0)
+               return ret;
+       mixer_ctx->irq = ret;
 
-       ret = devm_request_irq(dev, res->start, mixer_irq_handler,
-                                               0, "drm_mixer", mixer_ctx);
+       ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler,
+                              0, "drm_mixer", mixer_ctx);
        if (ret) {
                dev_err(dev, "request interrupt failed.\n");
                return ret;
        }
-       mixer_ctx->irq = res->start;
 
        return 0;
 }
index a4c94dc..cfd9325 100644 (file)
@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
 config DRM_I915_GVT
        bool "Enable Intel GVT-g graphics virtualization host support"
        depends on DRM_I915
+       depends on X86
        depends on 64BIT
        default n
        help
index 2da4aac..8ac196e 100644 (file)
@@ -825,6 +825,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
        unsigned int max_bw_point = 0, max_bw = 0;
        unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
        unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+       bool changed = false;
        u32 mask = 0;
 
        /* FIXME earlier gens need some checks too */
@@ -868,6 +869,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
                new_bw_state->data_rate[crtc->pipe] = new_data_rate;
                new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
 
+               changed = true;
+
                drm_dbg_kms(&dev_priv->drm,
                            "pipe %c data rate %u num active planes %u\n",
                            pipe_name(crtc->pipe),
@@ -875,7 +878,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
                            new_bw_state->num_active_planes[crtc->pipe]);
        }
 
-       if (!new_bw_state)
+       old_bw_state = intel_atomic_get_old_bw_state(state);
+       new_bw_state = intel_atomic_get_new_bw_state(state);
+
+       if (new_bw_state &&
+           intel_can_enable_sagv(dev_priv, old_bw_state) !=
+           intel_can_enable_sagv(dev_priv, new_bw_state))
+               changed = true;
+
+       /*
+        * If none of our inputs (data rates, number of active
+        * planes, SAGV yes/no) changed then nothing to do here.
+        */
+       if (!changed)
                return 0;
 
        ret = intel_atomic_lock_global_state(&new_bw_state->base);
@@ -961,7 +976,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
         */
        new_bw_state->qgv_points_mask = ~allowed_points & mask;
 
-       old_bw_state = intel_atomic_get_old_bw_state(state);
        /*
         * If the actual mask had changed we need to make sure that
         * the commits are serialized(in case this is a nomodeset, nonblocking)
index 46c6eec..0ceaed1 100644 (file)
@@ -30,19 +30,19 @@ struct intel_bw_state {
         */
        u8 pipe_sagv_reject;
 
+       /* bitmask of active pipes */
+       u8 active_pipes;
+
        /*
         * Current QGV points mask, which restricts
         * some particular SAGV states, not to confuse
         * with pipe_sagv_mask.
         */
-       u8 qgv_points_mask;
+       u16 qgv_points_mask;
 
        unsigned int data_rate[I915_MAX_PIPES];
        u8 num_active_planes[I915_MAX_PIPES];
 
-       /* bitmask of active pipes */
-       u8 active_pipes;
-
        int min_cdclk;
 };
 
index bf7ce68..bb4a854 100644 (file)
@@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
                vlv_wm_sanitize(dev_priv);
        } else if (DISPLAY_VER(dev_priv) >= 9) {
                skl_wm_get_hw_state(dev_priv);
+               skl_wm_sanitize(dev_priv);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                ilk_wm_get_hw_state(dev_priv);
        }
index c1439fc..3ff149d 100644 (file)
@@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
                struct drm_display_mode *fixed_mode)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_encoder *encoder = connector->encoder;
        struct drm_display_mode *downclock_mode = NULL;
 
        INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
@@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
                return NULL;
        }
 
+       if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
+           encoder->port != PORT_A) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "DRRS only supported on eDP port A\n");
+               return NULL;
+       }
+
        if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
                drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
                return NULL;
index 160fd2b..957feec 100644 (file)
@@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
 
        /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
        if (DISPLAY_VER(i915) >= 11 &&
-           (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
+           (plane_state->view.color_plane[0].y +
+            (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
                plane_state->no_fbc_reason = "plane end Y offset misaligned";
                return false;
        }
index 0065111..4a26628 100644 (file)
@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                port++;
        }
 
+       /*
+        * The port numbering and mapping here is bizarre. The now-obsolete
+        * swsci spec supports ports numbered [0..4]. Port E is handled as a
+        * special case, but port F and beyond are not. The functionality is
+        * supposed to be obsolete for new platforms. Just bail out if the port
+        * number is out of bounds after mapping.
+        */
+       if (port > 4) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+                           intel_encoder->base.base.id, intel_encoder->base.name,
+                           port_name(intel_encoder->port), port);
+               return -EINVAL;
+       }
+
        if (!enable)
                parm |= 4 << 8;
 
index 09f405e..92ff654 100644 (file)
@@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv)
                if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy),
                                            DG2_PHY_DP_TX_ACK_MASK, 25))
                        DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n",
-                                 phy);
+                                 phy_name(phy));
        }
 }
 
index dbd7d0d..7784c30 100644 (file)
@@ -691,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
 {
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        struct intel_encoder *encoder = &dig_port->base;
+       intel_wakeref_t tc_cold_wref;
+       enum intel_display_power_domain domain;
        int active_links = 0;
 
        mutex_lock(&dig_port->tc_lock);
@@ -702,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
 
        drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
        drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
-       if (active_links) {
-               enum intel_display_power_domain domain;
-               intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
 
-               dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+       tc_cold_wref = tc_cold_block(dig_port, &domain);
 
+       dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+       if (active_links) {
                if (!icl_tc_phy_is_connected(dig_port))
                        drm_dbg_kms(&i915->drm,
                                    "Port %s: PHY disconnected with %d active link(s)\n",
@@ -716,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
 
                dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
                                                          &dig_port->tc_lock_power_domain);
-
-               tc_cold_unblock(dig_port, domain, tc_cold_wref);
+       } else {
+               /*
+                * TBT-alt is the default mode in any case the PHY ownership is not
+                * held (regardless of the sink's connected live state), so
+                * we'll just switch to disconnected mode from it here without
+                * a note.
+                */
+               if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+                       drm_dbg_kms(&i915->drm,
+                                   "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+                                   dig_port->tc_port_name,
+                                   tc_port_mode_name(dig_port->tc_mode));
+               icl_tc_phy_disconnect(dig_port);
        }
 
+       tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
        drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
                    dig_port->tc_port_name,
                    tc_port_mode_name(dig_port->tc_mode));
index de3fe79..1f880c8 100644 (file)
@@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
        } else if (obj->mm.madv != I915_MADV_WILLNEED) {
                bo->priority = I915_TTM_PRIO_PURGE;
        } else if (!i915_gem_object_has_pages(obj)) {
-               if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
-                       bo->priority = I915_TTM_PRIO_HAS_PAGES;
+               bo->priority = I915_TTM_PRIO_NO_PAGES;
        } else {
-               if (bo->priority > I915_TTM_PRIO_NO_PAGES)
-                       bo->priority = I915_TTM_PRIO_NO_PAGES;
+               bo->priority = I915_TTM_PRIO_HAS_PAGES;
        }
 
        ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
index ee9612a..e130c82 100644 (file)
@@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
 
                if (!IS_ERR(fence))
                        goto out;
-       } else if (move_deps) {
-               int err = i915_deps_sync(move_deps, ctx);
+       } else {
+               int err = PTR_ERR(fence);
+
+               if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
+                       return fence;
 
-               if (err)
-                       return ERR_PTR(err);
+               if (move_deps) {
+                       err = i915_deps_sync(move_deps, ctx);
+                       if (err)
+                               return ERR_PTR(err);
+               }
        }
 
        /* Error intercept failed or no accelerated migration to start with */
index 13b27b8..ba21ace 100644 (file)
@@ -110,7 +110,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
 {
        u32 request[] = {
                GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
-               SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2),
+               SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
                id,
        };
 
index 99d1781..af79b39 100644 (file)
@@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
        ops->set_pfn(se, s->shadow_page.mfn);
 }
 
-/**
+/*
  * Check if can do 2M page
  * @vgpu: target vgpu
  * @entry: target pfn's gtt entry
@@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
 }
 
 /**
- * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
  * @vgpu: a vGPU
  * @off: register offset
  * @p_data: data will be returned to guest
index 76f1d53..3ad22bb 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef __I915_MM_H__
 #define __I915_MM_H__
 
+#include <linux/bug.h>
 #include <linux/types.h>
 
 struct vm_area_struct;
index da8f82c..fc8a68f 100644 (file)
@@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                /* Comet Lake V PCH is based on KBP, which is SPT compatible */
                return PCH_SPT;
        case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+       case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
                drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
                return PCH_ICP;
@@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                            !IS_GEN9_BC(dev_priv));
                return PCH_TGP;
        case INTEL_PCH_JSP_DEVICE_ID_TYPE:
-       case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
                drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
                return PCH_JSP;
index 6bff775..4ba0f19 100644 (file)
@@ -50,11 +50,11 @@ enum intel_pch {
 #define INTEL_PCH_CMP2_DEVICE_ID_TYPE          0x0680
 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE         0xA380
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE           0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE          0x3880
 #define INTEL_PCH_MCC_DEVICE_ID_TYPE           0x4B00
 #define INTEL_PCH_TGP_DEVICE_ID_TYPE           0xA080
 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE          0x4380
 #define INTEL_PCH_JSP_DEVICE_ID_TYPE           0x4D80
-#define INTEL_PCH_JSP2_DEVICE_ID_TYPE          0x3880
 #define INTEL_PCH_ADP_DEVICE_ID_TYPE           0x7A80
 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE          0x5180
 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE          0x7A00
index 434b1f8..fae4f78 100644 (file)
@@ -4029,6 +4029,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
                        return ret;
        }
 
+       if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+           intel_can_enable_sagv(dev_priv, old_bw_state)) {
+               ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+               ret = intel_atomic_lock_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       }
+
        for_each_new_intel_crtc_in_state(state, crtc,
                                         new_crtc_state, i) {
                struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
@@ -4044,17 +4055,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
                        intel_can_enable_sagv(dev_priv, new_bw_state);
        }
 
-       if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
-           intel_can_enable_sagv(dev_priv, old_bw_state)) {
-               ret = intel_atomic_serialize_global_state(&new_bw_state->base);
-               if (ret)
-                       return ret;
-       } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
-               ret = intel_atomic_lock_global_state(&new_bw_state->base);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
 };
 
 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+       /*
+        * Keep the join_mbus cases first so check_mbus_joined()
+        * will prefer them over the !join_mbus cases.
+        */
        {
                .active_pipes = BIT(PIPE_A),
                .dbuf_mask = {
@@ -4731,6 +4735,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
                },
                .join_mbus = true,
        },
+       {
+               .active_pipes = BIT(PIPE_A),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+               },
+               .join_mbus = false,
+       },
+       {
+               .active_pipes = BIT(PIPE_B),
+               .dbuf_mask = {
+                       [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+               .join_mbus = false,
+       },
        {
                .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
                .dbuf_mask = {
@@ -4835,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes,
 {
        int i;
 
-       for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+       for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
                if (dbuf_slices[i].active_pipes == active_pipes)
                        return dbuf_slices[i].join_mbus;
        }
@@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes)
        return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
 }
 
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
                              const struct dbuf_slice_conf_entry *dbuf_slices)
 {
        int i;
 
-       for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
-               if (dbuf_slices[i].active_pipes == active_pipes)
+       for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+               if (dbuf_slices[i].active_pipes == active_pipes &&
+                   dbuf_slices[i].join_mbus == join_mbus)
                        return dbuf_slices[i].dbuf_mask[pipe];
        }
        return 0;
@@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
  * returns correspondent DBuf slice mask as stated in BSpec for particular
  * platform.
  */
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
 {
        /*
         * FIXME: For ICL this is still a bit unclear as prev BSpec revision
@@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
         * still here - we will need it once those additional constraints
         * pop up.
         */
-       return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+       return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+                                  icl_allowed_dbufs);
 }
 
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
 {
-       return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+       return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+                                  tgl_allowed_dbufs);
 }
 
-static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
 {
-       return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+       return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+                                  adlp_allowed_dbufs);
 }
 
-static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
 {
-       return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
+       return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+                                  dg2_allowed_dbufs);
 }
 
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
 
        if (IS_DG2(dev_priv))
-               return dg2_compute_dbuf_slices(pipe, active_pipes);
+               return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
        else if (IS_ALDERLAKE_P(dev_priv))
-               return adlp_compute_dbuf_slices(pipe, active_pipes);
+               return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
        else if (DISPLAY_VER(dev_priv) == 12)
-               return tgl_compute_dbuf_slices(pipe, active_pipes);
+               return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
        else if (DISPLAY_VER(dev_priv) == 11)
-               return icl_compute_dbuf_slices(pipe, active_pipes);
+               return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
        /*
         * For anything else just return one slice yet.
         * Should be extended for other platforms.
@@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state)
                        return ret;
        }
 
+       if (IS_ALDERLAKE_P(dev_priv))
+               new_dbuf_state->joined_mbus =
+                       adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
        for_each_intel_crtc(&dev_priv->drm, crtc) {
                enum pipe pipe = crtc->pipe;
 
                new_dbuf_state->slices[pipe] =
-                       skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+                       skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+                                               new_dbuf_state->joined_mbus);
 
                if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
                        continue;
@@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
 
        new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
 
-       if (IS_ALDERLAKE_P(dev_priv))
-               new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
        if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
            old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
                ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
@@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
                enum pipe pipe = crtc->pipe;
                unsigned int mbus_offset;
                enum plane_id plane_id;
+               u8 slices;
 
                skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
                crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
@@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
                        skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
                }
 
-               dbuf_state->slices[pipe] =
-                       skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
-
                dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
 
                /*
                 * Used for checking overlaps, so we need absolute
                 * offsets instead of MBUS relative offsets.
                 */
-               mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+               slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+                                                dbuf_state->joined_mbus);
+               mbus_offset = mbus_ddb_offset(dev_priv, slices);
                crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
                crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
 
+               /* The slices actually used by the planes on the pipe */
+               dbuf_state->slices[pipe] =
+                       skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
+
                drm_dbg_kms(&dev_priv->drm,
                            "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
                            crtc->base.base.id, crtc->base.name,
@@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
        dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
 }
 
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+       const struct intel_dbuf_state *dbuf_state =
+               to_intel_dbuf_state(i915->dbuf.obj.state);
+       struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+       struct intel_crtc *crtc;
+
+       for_each_intel_crtc(&i915->drm, crtc) {
+               const struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+
+               entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+       }
+
+       for_each_intel_crtc(&i915->drm, crtc) {
+               const struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+               u8 slices;
+
+               slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+                                                dbuf_state->joined_mbus);
+               if (dbuf_state->slices[crtc->pipe] & ~slices)
+                       return true;
+
+               if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+                                               I915_MAX_PIPES, crtc->pipe))
+                       return true;
+       }
+
+       return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+       struct intel_crtc *crtc;
+
+       /*
+        * On TGL/RKL (at least) the BIOS likes to assign the planes
+        * to the wrong DBUF slices. This will cause an infinite loop
+        * in skl_commit_modeset_enables() as it can't find a way to
+        * transition between the old bogus DBUF layout to the new
+        * proper DBUF layout without DBUF allocation overlaps between
+        * the planes (which cannot be allowed or else the hardware
+        * may hang). If we detect a bogus DBUF layout just turn off
+        * all the planes so that skl_commit_modeset_enables() can
+        * simply ignore them.
+        */
+       if (!skl_dbuf_is_misconfigured(i915))
+               return;
+
+       drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+       for_each_intel_crtc(&i915->drm, crtc) {
+               struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+               const struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+
+               if (plane_state->uapi.visible)
+                       intel_plane_disable_noatomic(crtc, plane);
+
+               drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+               memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+       }
+}
+
 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
index 990cdca..d224365 100644 (file)
@@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
                              struct skl_pipe_wm *out);
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
 void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+void skl_wm_sanitize(struct drm_i915_private *dev_priv);
 bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
                           const struct intel_bw_state *bw_state);
 void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
index 53f1ccb..64c2708 100644 (file)
@@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 {
        spin_lock_init(&rpm->debug.lock);
-
-       if (rpm->available)
-               stack_depot_init();
+       stack_depot_init();
 }
 
 static noinline depot_stack_handle_t
index 7374f19..5c2b227 100644 (file)
@@ -2,6 +2,7 @@ config DRM_IMX_DCSS
        tristate "i.MX8MQ DCSS"
        select IMX_IRQSTEER
        select DRM_KMS_HELPER
+       select DRM_GEM_CMA_HELPER
        select VIDEOMODE_HELPERS
        depends on DRM && ARCH_MXC && ARM64
        help
index 5d90d2e..bced4c7 100644 (file)
@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
        mtk_dsi_poweroff(dsi);
 }
 
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+       int ret;
+
+       ret = drm_simple_encoder_init(drm, &dsi->encoder,
+                                     DRM_MODE_ENCODER_DSI);
+       if (ret) {
+               DRM_ERROR("Failed to encoder init to drm\n");
+               return ret;
+       }
+
+       dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+
+       ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+       if (ret)
+               goto err_cleanup_encoder;
+
+       dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+       if (IS_ERR(dsi->connector)) {
+               DRM_ERROR("Unable to create bridge connector\n");
+               ret = PTR_ERR(dsi->connector);
+               goto err_cleanup_encoder;
+       }
+       drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+       return 0;
+
+err_cleanup_encoder:
+       drm_encoder_cleanup(&dsi->encoder);
+       return ret;
+}
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+       int ret;
+       struct drm_device *drm = data;
+       struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+       ret = mtk_dsi_encoder_init(drm, dsi);
+       if (ret)
+               return ret;
+
+       return device_reset_optional(dev);
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+                          void *data)
+{
+       struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+       drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+       .bind = mtk_dsi_bind,
+       .unbind = mtk_dsi_unbind,
+};
+
 static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
                               struct mipi_dsi_device *device)
 {
        struct mtk_dsi *dsi = host_to_dsi(host);
+       struct device *dev = host->dev;
+       int ret;
 
        dsi->lanes = device->lanes;
        dsi->format = device->format;
        dsi->mode_flags = device->mode_flags;
+       dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+       if (IS_ERR(dsi->next_bridge))
+               return PTR_ERR(dsi->next_bridge);
+
+       drm_bridge_add(&dsi->bridge);
+
+       ret = component_add(host->dev, &mtk_dsi_component_ops);
+       if (ret) {
+               DRM_ERROR("failed to add dsi_host component: %d\n", ret);
+               drm_bridge_remove(&dsi->bridge);
+               return ret;
+       }
 
        return 0;
 }
 
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+                              struct mipi_dsi_device *device)
+{
+       struct mtk_dsi *dsi = host_to_dsi(host);
+
+       component_del(host->dev, &mtk_dsi_component_ops);
+       drm_bridge_remove(&dsi->bridge);
+       return 0;
+}
+
 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
 {
        int ret;
@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
 
 static const struct mipi_dsi_host_ops mtk_dsi_ops = {
        .attach = mtk_dsi_host_attach,
+       .detach = mtk_dsi_host_detach,
        .transfer = mtk_dsi_host_transfer,
 };
 
-static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
-{
-       int ret;
-
-       ret = drm_simple_encoder_init(drm, &dsi->encoder,
-                                     DRM_MODE_ENCODER_DSI);
-       if (ret) {
-               DRM_ERROR("Failed to encoder init to drm\n");
-               return ret;
-       }
-
-       dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
-
-       ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
-                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-       if (ret)
-               goto err_cleanup_encoder;
-
-       dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
-       if (IS_ERR(dsi->connector)) {
-               DRM_ERROR("Unable to create bridge connector\n");
-               ret = PTR_ERR(dsi->connector);
-               goto err_cleanup_encoder;
-       }
-       drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
-
-       return 0;
-
-err_cleanup_encoder:
-       drm_encoder_cleanup(&dsi->encoder);
-       return ret;
-}
-
-static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
-{
-       int ret;
-       struct drm_device *drm = data;
-       struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
-       ret = mtk_dsi_encoder_init(drm, dsi);
-       if (ret)
-               return ret;
-
-       return device_reset_optional(dev);
-}
-
-static void mtk_dsi_unbind(struct device *dev, struct device *master,
-                          void *data)
-{
-       struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
-       drm_encoder_cleanup(&dsi->encoder);
-}
-
-static const struct component_ops mtk_dsi_component_ops = {
-       .bind = mtk_dsi_bind,
-       .unbind = mtk_dsi_unbind,
-};
-
 static int mtk_dsi_probe(struct platform_device *pdev)
 {
        struct mtk_dsi *dsi;
        struct device *dev = &pdev->dev;
-       struct drm_panel *panel;
        struct resource *regs;
        int irq_num;
        int ret;
@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
-                                         &panel, &dsi->next_bridge);
-       if (ret)
-               goto err_unregister_host;
-
-       if (panel) {
-               dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
-               if (IS_ERR(dsi->next_bridge)) {
-                       ret = PTR_ERR(dsi->next_bridge);
-                       goto err_unregister_host;
-               }
-       }
-
        dsi->driver_data = of_device_get_match_data(dev);
 
        dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
        dsi->bridge.of_node = dev->of_node;
        dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
 
-       drm_bridge_add(&dsi->bridge);
-
-       ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add component: %d\n", ret);
-               goto err_unregister_host;
-       }
-
        return 0;
 
 err_unregister_host:
@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
        struct mtk_dsi *dsi = platform_get_drvdata(pdev);
 
        mtk_output_dsi_disable(dsi);
-       drm_bridge_remove(&dsi->bridge);
-       component_del(&pdev->dev, &mtk_dsi_component_ops);
        mipi_dsi_host_unregister(&dsi->host);
 
        return 0;
index 9e46db5..3c08f98 100644 (file)
@@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
                err = panel_dpi_probe(dev, panel);
                if (err)
                        goto free_ddc;
+               desc = panel->desc;
        } else {
                if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
                        panel_simple_parse_panel_timing_node(dev, panel, &dt);
index 0fce73b..70bd84b 100644 (file)
@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
         * so don't register a backlight device
         */
        if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
-           (rdev->pdev->device == 0x6741))
+           (rdev->pdev->device == 0x6741) &&
+           !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
                return;
 
        if (!radeon_encoder->enc_priv)
index 377f9cd..84013fa 100644 (file)
@@ -470,8 +470,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
        int32_t *msg, msg_type, handle;
        unsigned img_size = 0;
        void *ptr;
-
-       int i, r;
+       long r;
+       int i;
 
        if (offset & 0x3F) {
                DRM_ERROR("UVD messages must be 64 byte aligned!\n");
@@ -481,13 +481,13 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
        r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
                                  MAX_SCHEDULE_TIMEOUT);
        if (r <= 0) {
-               DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+               DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
                return r ? r : -ETIME;
        }
 
        r = radeon_bo_kmap(bo, &ptr);
        if (r) {
-               DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+               DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
                return r;
        }
 
index 830bdd5..8677c82 100644 (file)
@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
                return ret;
        }
 
-       ret = clk_prepare_enable(hdmi->vpll_clk);
-       if (ret) {
-               DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
-                             ret);
-               return ret;
-       }
-
        hdmi->phy = devm_phy_optional_get(dev, "hdmi");
        if (IS_ERR(hdmi->phy)) {
                ret = PTR_ERR(hdmi->phy);
@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
                return ret;
        }
 
+       ret = clk_prepare_enable(hdmi->vpll_clk);
+       if (ret) {
+               DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+                             ret);
+               return ret;
+       }
+
        drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
        drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
index 1f7353f..798b542 100644 (file)
@@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = {
        .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
        .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+       .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
        .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
        .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
        .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
@@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = {
        .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
        .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
        .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+       .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
 };
 
 /*
@@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = {
 static const struct vop_win_data rk3399_vop_win_data[] = {
        { .base = 0x00, .phy = &rk3399_win01_data,
          .type = DRM_PLANE_TYPE_PRIMARY },
-       { .base = 0x40, .phy = &rk3288_win01_data,
+       { .base = 0x40, .phy = &rk3368_win01_data,
          .type = DRM_PLANE_TYPE_OVERLAY },
-       { .base = 0x00, .phy = &rk3288_win23_data,
+       { .base = 0x00, .phy = &rk3368_win23_data,
          .type = DRM_PLANE_TYPE_OVERLAY },
-       { .base = 0x50, .phy = &rk3288_win23_data,
+       { .base = 0x50, .phy = &rk3368_win23_data,
          .type = DRM_PLANE_TYPE_CURSOR },
 };
 
index 8cf5aeb..201f517 100644 (file)
@@ -5,6 +5,7 @@ config DRM_TEGRA
        depends on COMMON_CLK
        depends on DRM
        depends on OF
+       select DRM_DP_AUX_BUS
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
        select DRM_PANEL
index 1f96e41..d7a731d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/workqueue.h>
 
 #include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_aux_bus.h>
 #include <drm/drm_panel.h>
 
 #include "dp.h"
@@ -570,6 +571,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        list_add_tail(&dpaux->list, &dpaux_list);
        mutex_unlock(&dpaux_lock);
 
+       err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+       if (err < 0) {
+               dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+               return err;
+       }
+
        return 0;
 }
 
index 223ab2c..3762d87 100644 (file)
@@ -63,7 +63,7 @@ static void falcon_copy_firmware_image(struct falcon *falcon,
 
        /* copy the whole thing taking into account endianness */
        for (i = 0; i < firmware->size / sizeof(u32); i++)
-               virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+               virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
 }
 
 static int falcon_parse_firmware_image(struct falcon *falcon)
index 287dbc8..783890e 100644 (file)
@@ -525,9 +525,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
        if (ret)
                return ret;
 
-       ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
-       if (ret)
-               return ret;
+       /*
+        * post_crtc_powerdown will have called pm_runtime_put, so we
+        * don't need it here otherwise we'll get the reference counting
+        * wrong.
+        */
 
        return 0;
 }
@@ -671,7 +673,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
                const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
                struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
 
-               mode = &crtc_state->adjusted_mode;
                if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
                        vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
                                                  mode->clock * 9 / 10) * 1000;
index 053fbaf..3a1626f 100644 (file)
@@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
                if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
                        connected = true;
        } else {
-               unsigned long flags;
-               u32 hotplug;
-
-               spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
-               hotplug = HDMI_READ(HDMI_HOTPLUG);
-               spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
-               if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
+               if (vc4_hdmi->variant->hp_detect &&
+                   vc4_hdmi->variant->hp_detect(vc4_hdmi))
                        connected = true;
        }
 
@@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
        unsigned long long tmds_rate;
 
        if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+           !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
            ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
             (mode->hsync_end % 2) || (mode->htotal % 2)))
                return -EINVAL;
@@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 
        if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+           !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
            ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
             (mode->hsync_end % 2) || (mode->htotal % 2)))
                return MODE_H_ILLEGAL;
@@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
        return channel_map;
 }
 
+static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
+{
+       unsigned long flags;
+       u32 hotplug;
+
+       spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+       hotplug = HDMI_READ(HDMI_HOTPLUG);
+       spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+       return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
+}
+
 /* HDMI audio codec callbacks */
 static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
                                         unsigned int samplerate)
@@ -1741,6 +1749,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
                dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
                return PTR_ERR(codec_pdev);
        }
+       vc4_hdmi->audio.codec_pdev = codec_pdev;
 
        dai_link->cpus          = &vc4_hdmi->audio.cpu;
        dai_link->codecs        = &vc4_hdmi->audio.codec;
@@ -1780,6 +1789,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
 
 }
 
+static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
+{
+       platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+       vc4_hdmi->audio.codec_pdev = NULL;
+}
+
 static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
 {
        struct vc4_hdmi *vc4_hdmi = priv;
@@ -2504,7 +2519,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
         * vc4_hdmi_disable_scrambling() will thus run at boot, make
         * sure it's disabled, and avoid any inconsistency.
         */
-       vc4_hdmi->scdc_enabled = true;
+       if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
+               vc4_hdmi->scdc_enabled = true;
 
        ret = variant->init_resources(vc4_hdmi);
        if (ret)
@@ -2651,6 +2667,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
        kfree(vc4_hdmi->hdmi_regset.regs);
        kfree(vc4_hdmi->hd_regset.regs);
 
+       vc4_hdmi_audio_exit(vc4_hdmi);
        vc4_hdmi_cec_exit(vc4_hdmi);
        vc4_hdmi_hotplug_exit(vc4_hdmi);
        vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
@@ -2723,6 +2740,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
        .phy_rng_disable        = vc5_hdmi_phy_rng_disable,
        .channel_map            = vc5_hdmi_channel_map,
        .supports_hdr           = true,
+       .hp_detect              = vc5_hdmi_hp_detect,
 };
 
 static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
@@ -2751,6 +2769,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
        .phy_rng_disable        = vc5_hdmi_phy_rng_disable,
        .channel_map            = vc5_hdmi_channel_map,
        .supports_hdr           = true,
+       .hp_detect              = vc5_hdmi_hp_detect,
 };
 
 static const struct of_device_id vc4_hdmi_dt_match[] = {
index 36c0b08..6ffdd4e 100644 (file)
@@ -102,6 +102,9 @@ struct vc4_hdmi_variant {
 
        /* Enables HDR metadata */
        bool supports_hdr;
+
+       /* Callback for hardware specific hotplug detect */
+       bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi);
 };
 
 /* HDMI audio information */
@@ -113,6 +116,7 @@ struct vc4_hdmi_audio {
        struct snd_soc_dai_link_component platform;
        struct snd_dmaengine_dai_dma_data dma_data;
        struct hdmi_audio_infoframe infoframe;
+       struct platform_device *codec_pdev;
        bool streaming;
 };
 
index e08e331..f87a870 100644 (file)
@@ -137,8 +137,15 @@ void host1x_syncpt_restore(struct host1x *host)
        struct host1x_syncpt *sp_base = host->syncpt;
        unsigned int i;
 
-       for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+       for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+               /*
+                * Unassign syncpt from channels for purposes of Tegra186
+                * syncpoint protection. This prevents any channel from
+                * accessing it until it is reassigned.
+                */
+               host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL);
                host1x_hw_syncpt_restore(host, sp_base + i);
+       }
 
        for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
                host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
@@ -227,27 +234,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        void *ref;
        struct host1x_waitlist *waiter;
        int err = 0, check_count = 0;
-       u32 val;
 
        if (value)
-               *value = 0;
-
-       /* first check cache */
-       if (host1x_syncpt_is_expired(sp, thresh)) {
-               if (value)
-                       *value = host1x_syncpt_load(sp);
+               *value = host1x_syncpt_load(sp);
 
+       if (host1x_syncpt_is_expired(sp, thresh))
                return 0;
-       }
-
-       /* try to read from register */
-       val = host1x_hw_syncpt_load(sp->host, sp);
-       if (host1x_syncpt_is_expired(sp, thresh)) {
-               if (value)
-                       *value = val;
-
-               goto done;
-       }
 
        if (!timeout) {
                err = -EAGAIN;
@@ -352,13 +344,6 @@ int host1x_syncpt_init(struct host1x *host)
        for (i = 0; i < host->info->nb_pts; i++) {
                syncpt[i].id = i;
                syncpt[i].host = host;
-
-               /*
-                * Unassign syncpt from channels for purposes of Tegra186
-                * syncpoint protection. This prevents any channel from
-                * accessing it until it is reassigned.
-                */
-               host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
        }
 
        for (i = 0; i < host->info->nb_bases; i++)
index 2503be0..19fa734 100644 (file)
@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
 {
        union cmd_response cmd_resp;
 
-       /* Get response with status within a max of 800 ms timeout */
+       /* Get response with status within a max of 1600 ms timeout */
        if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
                                (cmd_resp.response_v2.response == sensor_sts &&
                                cmd_resp.response_v2.status == 0 && (sid == 0xff ||
-                               cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+                               cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
                return cmd_resp.response_v2.response;
 
        return SENSOR_DISABLED;
@@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
 
        cmd_base.ul = 0;
        cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+       cmd_base.cmd_v2.intr_disable = 1;
        cmd_base.cmd_v2.period = info.period;
        cmd_base.cmd_v2.sensor_id = info.sensor_idx;
        cmd_base.cmd_v2.length = 16;
@@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
 
        cmd_base.ul = 0;
        cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+       cmd_base.cmd_v2.intr_disable = 1;
        cmd_base.cmd_v2.period = 0;
        cmd_base.cmd_v2.sensor_id = sensor_idx;
        cmd_base.cmd_v2.length  = 16;
@@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
        union sfh_cmd_base cmd_base;
 
        cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+       cmd_base.cmd_v2.intr_disable = 1;
        cmd_base.cmd_v2.period = 0;
        cmd_base.cmd_v2.sensor_id = 0;
 
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
 
+static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+{
+       if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
+               writel(0, privdata->mmio + AMD_P2C_MSG(4));
+               writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+       }
+}
+
+static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+{
+       if (privdata->mp2_ops->clear_intr)
+               privdata->mp2_ops->clear_intr(privdata);
+}
+
+static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
+{
+       amd_sfh_clear_intr(data);
+
+       return IRQ_HANDLED;
+}
+
+static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+{
+       int rc;
+
+       pci_intx(privdata->pdev, true);
+
+       rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
+                             amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
+       if (rc) {
+               dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n",
+                       privdata->pdev->irq, rc);
+               return rc;
+       }
+
+       return 0;
+}
+
 void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
 {
        union sfh_cmd_param cmd_param;
@@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata)
        struct amd_mp2_dev *mp2 = privdata;
        amd_sfh_hid_client_deinit(privdata);
        mp2->mp2_ops->stop_all(mp2);
+       pci_intx(mp2->pdev, false);
+       amd_sfh_clear_intr(mp2);
 }
 
 static const struct amd_mp2_ops amd_sfh_ops_v2 = {
@@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
        .stop = amd_stop_sensor_v2,
        .stop_all = amd_stop_all_sensor_v2,
        .response = amd_sfh_wait_response_v2,
+       .clear_intr = amd_sfh_clear_intr_v2,
+       .init_intr = amd_sfh_irq_init_v2,
 };
 
 static const struct amd_mp2_ops amd_sfh_ops = {
@@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata)
        }
 }
 
+static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+{
+       if (privdata->mp2_ops->init_intr)
+               return privdata->mp2_ops->init_intr(privdata);
+
+       return 0;
+}
+
 static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct amd_mp2_dev *privdata;
@@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
 
        mp2_select_ops(privdata);
 
+       rc = amd_sfh_irq_init(privdata);
+       if (rc) {
+               dev_err(&pdev->dev, "amd_sfh_irq_init failed\n");
+               return rc;
+       }
+
        rc = amd_sfh_hid_client_init(privdata);
-       if (rc)
+       if (rc) {
+               amd_sfh_clear_intr(privdata);
+               dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
                return rc;
+       }
+
+       amd_sfh_clear_intr(privdata);
 
        return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
 }
@@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
                }
        }
 
+       schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+       amd_sfh_clear_intr(mp2);
+
        return 0;
 }
 
@@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
                }
        }
 
+       cancel_delayed_work_sync(&cl_data->work_buffer);
+       amd_sfh_clear_intr(mp2);
+
        return 0;
 }
 
index ae30e05..97b9986 100644 (file)
@@ -49,7 +49,7 @@ union sfh_cmd_base {
        } s;
        struct {
                u32 cmd_id : 4;
-               u32 intr_enable : 1;
+               u32 intr_disable : 1;
                u32 rsvd1 : 3;
                u32 length : 7;
                u32 mem_type : 1;
@@ -141,5 +141,7 @@ struct amd_mp2_ops {
         void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
         void (*stop_all)(struct amd_mp2_dev *privdata);
         int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+        void (*clear_intr)(struct amd_mp2_dev *privdata);
+        int (*init_intr)(struct amd_mp2_dev *privdata);
 };
 #endif
index be41f83..76095bd 100644 (file)
@@ -27,6 +27,7 @@
 #define HID_USAGE_SENSOR_STATE_READY_ENUM                             0x02
 #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM                      0x05
 #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM                      0x04
+#define ILLUMINANCE_MASK                                       GENMASK(14, 0)
 
 int get_report_descriptor(int sensor_idx, u8 *rep_desc)
 {
@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
                get_common_inputs(&als_input.common_property, report_id);
                /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
                if (supported_input == V2_STATUS)
-                       als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+                       als_input.illuminance_value =
+                               readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
                else
                        als_input.illuminance_value =
                                (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
index 24802a4..7dc89dc 100644 (file)
@@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
-               .driver_data = APPLE_HAS_FN },
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
index 26c31d7..81e7e40 100644 (file)
@@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_F22] = "F22",                      [KEY_F23] = "F23",
        [KEY_F24] = "F24",                      [KEY_PLAYCD] = "PlayCD",
        [KEY_PAUSECD] = "PauseCD",              [KEY_PROG3] = "Prog3",
-       [KEY_PROG4] = "Prog4",                  [KEY_SUSPEND] = "Suspend",
+       [KEY_PROG4] = "Prog4",
+       [KEY_ALL_APPLICATIONS] = "AllApplications",
+       [KEY_SUSPEND] = "Suspend",
        [KEY_CLOSE] = "Close",                  [KEY_PLAY] = "Play",
        [KEY_FASTFORWARD] = "FastForward",      [KEY_BASSBOOST] = "BassBoost",
        [KEY_PRINT] = "Print",                  [KEY_HP] = "HP",
@@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_ASSISTANT] = "Assistant",
        [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
        [KEY_EMOJI_PICKER] = "EmojiPicker",
+       [KEY_DICTATE] = "Dictate",
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
index 8e960d7..9b42b0c 100644 (file)
@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        return 0;
 err_free:
+       usb_put_dev(udev);
        kfree(priv);
        return ret;
 }
index 8597503..78bd3dd 100644 (file)
 #define USB_VENDOR_ID_UGTIZER                  0x2179
 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610    0x0053
 #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040    0x0077
+#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540    0x0004
 
 #define USB_VENDOR_ID_VIEWSONIC                        0x0543
 #define USB_DEVICE_ID_VIEWSONIC_PD1011         0xe621
index 112901d..56ec273 100644 (file)
@@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
 
+               case 0x0d8: map_key_clear(KEY_DICTATE);         break;
                case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
 
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
@@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
 
+               case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS);        break;
+
                case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV);             break;
                case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT);             break;
                case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP);                break;
index 9af1dc8..c066ba9 100644 (file)
@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
index b4dad66..ec6c73f 100644 (file)
@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
 
        struct regulator *vdd;
        struct notifier_block nb;
-       struct mutex regulator_mutex;
        struct gpio_desc *reset_gpio;
        const struct goodix_i2c_hid_timing_data *timings;
 };
@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
                container_of(nb, struct i2c_hid_of_goodix, nb);
        int ret = NOTIFY_OK;
 
-       mutex_lock(&ihid_goodix->regulator_mutex);
-
        switch (event) {
        case REGULATOR_EVENT_PRE_DISABLE:
                gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
                break;
        }
 
-       mutex_unlock(&ihid_goodix->regulator_mutex);
-
        return ret;
 }
 
@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
        if (!ihid_goodix)
                return -ENOMEM;
 
-       mutex_init(&ihid_goodix->regulator_mutex);
-
        ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
        ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
 
@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
         *   long. Holding the controller in reset apparently draws extra
         *   power.
         */
-       mutex_lock(&ihid_goodix->regulator_mutex);
        ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
        ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
-       if (ret) {
-               mutex_unlock(&ihid_goodix->regulator_mutex);
+       if (ret)
                return dev_err_probe(&client->dev, ret,
                        "regulator notifier request failed\n");
-       }
 
        /*
         * If someone else is holding the regulator on (or the regulator is
         * an always-on one) we might never be told to deassert reset. Do it
-        * now. Here we'll assume that someone else might have _just
-        * barely_ turned the regulator on so we'll do the full
-        * "post_power_delay" just in case.
+        * now... and temporarily bump the regulator reference count just to
+        * make sure it is impossible for this to race with our own notifier!
+        * We also assume that someone else might have _just barely_ turned
+        * the regulator on so we'll do the full "post_power_delay" just in
+        * case.
         */
-       if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+       if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
+               ret = regulator_enable(ihid_goodix->vdd);
+               if (ret)
+                       return ret;
                goodix_i2c_hid_deassert_reset(ihid_goodix, true);
-       mutex_unlock(&ihid_goodix->regulator_mutex);
+               regulator_disable(ihid_goodix->vdd);
+       }
 
        return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
 }
index eb2833d..8328851 100644 (file)
@@ -13,7 +13,7 @@
 #include "hv_utils_transport.h"
 
 static DEFINE_SPINLOCK(hvt_list_lock);
-static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+static LIST_HEAD(hvt_list);
 
 static void hvt_reset(struct hvutil_transport *hvt)
 {
index 17bf55f..12a2b37 100644 (file)
@@ -2028,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
        kobj->kset = dev->channels_kset;
        ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
                                   "%u", relid);
-       if (ret)
+       if (ret) {
+               kobject_put(kobj);
                return ret;
+       }
 
        ret = sysfs_create_group(kobj, &vmbus_chan_group);
 
@@ -2038,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
                 * The calling functions' error handling paths will cleanup the
                 * empty channel directory.
                 */
+               kobject_put(kobj);
                dev_err(device, "Unable to set up channel sysfs files\n");
                return ret;
        }
@@ -2079,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type,
        return child_device_obj;
 }
 
-static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
 /*
  * vmbus_device_register - Register the child device
  */
@@ -2120,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        }
        hv_debug_add_dev_dir(child_device_obj);
 
-       child_device_obj->device.dma_mask = &vmbus_dma_mask;
        child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
        return 0;
 
 err_kset_unregister:
index 3501a3e..3ae9619 100644 (file)
@@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
 
        tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
                                                   &hwmon_thermal_ops);
-       /*
-        * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
-        * so ignore that error but forward any other error.
-        */
-       if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
-               return PTR_ERR(tzd);
+       if (IS_ERR(tzd)) {
+               if (PTR_ERR(tzd) != -ENODEV)
+                       return PTR_ERR(tzd);
+               dev_info(dev, "temp%d_input not attached to any thermal zone\n",
+                        index + 1);
+               devm_kfree(dev, tdata);
+               return 0;
+       }
 
        err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
        if (err)
index 414204f..9c9e9f4 100644 (file)
@@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
        [NTC_NCP15XH103]      = { "ncp15xh103",      TYPE_NCPXXXH103 },
        [NTC_NCP18WB473]      = { "ncp18wb473",      TYPE_NCPXXWB473 },
        [NTC_NCP21WB473]      = { "ncp21wb473",      TYPE_NCPXXWB473 },
-       [NTC_SSG1404001221]   = { "ssg1404-001221",  TYPE_NCPXXWB473 },
+       [NTC_SSG1404001221]   = { "ssg1404_001221",  TYPE_NCPXXWB473 },
        [NTC_LAST]            = { },
 };
 
index 776ee22..ac2fbee 100644 (file)
@@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
                pmbus_update_sensor_data(client, s2);
 
        regval = status & mask;
+       if (regval) {
+               ret = pmbus_write_byte_data(client, page, reg, regval);
+               if (ret)
+                       goto unlock;
+       }
        if (s1 && s2) {
                s64 v1, v2;
 
index 42da31c..8a6c6ee 100644 (file)
@@ -488,7 +488,7 @@ config I2C_BRCMSTB
 
 config I2C_CADENCE
        tristate "Cadence I2C Controller"
-       depends on ARCH_ZYNQ || ARM64 || XTENSA
+       depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
        help
          Say yes here to select Cadence I2C Host Controller. This controller is
          e.g. used by Xilinx Zynq.
@@ -680,7 +680,7 @@ config I2C_IMG
 
 config I2C_IMX
        tristate "IMX I2C interface"
-       depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
+       depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
        select I2C_SLAVE
        help
          Say Y here if you want to use the IIC bus controller on
@@ -935,7 +935,7 @@ config I2C_QCOM_GENI
 
 config I2C_QUP
        tristate "Qualcomm QUP based I2C controller"
-       depends on ARCH_QCOM
+       depends on ARCH_QCOM || COMPILE_TEST
        help
          If you say yes to this option, support will be included for the
          built-in I2C interface on the Qualcomm SoCs.
index dfc5340..5149454 100644 (file)
 #define BCM2835_I2C_FIFO       0x10
 #define BCM2835_I2C_DIV                0x14
 #define BCM2835_I2C_DEL                0x18
+/*
+ * 16-bit field for the number of SCL cycles to wait after rising SCL
+ * before deciding the slave is not responding. 0 disables the
+ * timeout detection.
+ */
 #define BCM2835_I2C_CLKT       0x1c
 
 #define BCM2835_I2C_C_READ     BIT(0)
@@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
        adap->dev.of_node = pdev->dev.of_node;
        adap->quirks = of_device_get_match_data(&pdev->dev);
 
+       /*
+        * Disable the hardware clock stretching timeout. SMBUS
+        * specifies a limit for how long the device can stretch the
+        * clock, but core I2C doesn't.
+        */
+       bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0);
        bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
 
        ret = i2c_add_adapter(adap);
index 490ee39..b00f35c 100644 (file)
@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
 
        /* set the data in/out register size for compatible SoCs */
        if (of_device_is_compatible(dev->device->of_node,
-                                   "brcmstb,brcmper-i2c"))
+                                   "brcm,brcmper-i2c"))
                dev->data_regsz = sizeof(u8);
        else
                dev->data_regsz = sizeof(u32);
index c1de8eb..cf54f1c 100644 (file)
@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
                cci->master[idx].adap.quirks = &cci->data->quirks;
                cci->master[idx].adap.algo = &cci_algo;
                cci->master[idx].adap.dev.parent = dev;
-               cci->master[idx].adap.dev.of_node = child;
+               cci->master[idx].adap.dev.of_node = of_node_get(child);
                cci->master[idx].master = idx;
                cci->master[idx].cci = cci;
 
@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
                        continue;
 
                ret = i2c_add_adapter(&cci->master[i].adap);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(cci->master[i].adap.dev.of_node);
                        goto error_i2c;
+               }
        }
 
        pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
        return 0;
 
 error_i2c:
-       for (; i >= 0; i--) {
-               if (cci->master[i].cci)
+       for (--i ; i >= 0; i--) {
+               if (cci->master[i].cci) {
                        i2c_del_adapter(&cci->master[i].adap);
+                       of_node_put(cci->master[i].adap.dev.of_node);
+               }
        }
 error:
        disable_irq(cci->irq);
@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
        int i;
 
        for (i = 0; i < cci->data->num_masters; i++) {
-               if (cci->master[i].cci)
+               if (cci->master[i].cci) {
                        i2c_del_adapter(&cci->master[i].adap);
+                       of_node_put(cci->master[i].adap.dev.of_node);
+               }
                cci_halt(cci, i);
        }
 
index e6081dd..d11f668 100644 (file)
@@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
        ret = iio_device_register(indio_dev);
        if (ret < 0) {
                dev_err(dev, "Unable to register iio device\n");
-               goto err_trigger_unregister;
+               goto err_pm_cleanup;
        }
 
        return 0;
 
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(dev);
+       pm_runtime_disable(dev);
 err_trigger_unregister:
        bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
 err_buffer_cleanup:
index 32989d9..f7fd9e0 100644 (file)
@@ -173,12 +173,20 @@ struct fxls8962af_data {
        u16 upper_thres;
 };
 
-const struct regmap_config fxls8962af_regmap_conf = {
+const struct regmap_config fxls8962af_i2c_regmap_conf = {
        .reg_bits = 8,
        .val_bits = 8,
        .max_register = FXLS8962AF_MAX_REG,
 };
-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
+EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf);
+
+const struct regmap_config fxls8962af_spi_regmap_conf = {
+       .reg_bits = 8,
+       .pad_bits = 8,
+       .val_bits = 8,
+       .max_register = FXLS8962AF_MAX_REG,
+};
+EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf);
 
 enum {
        fxls8962af_idx_x,
index cfb004b..6bde989 100644 (file)
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client)
 {
        struct regmap *regmap;
 
-       regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
+       regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf);
        if (IS_ERR(regmap)) {
                dev_err(&client->dev, "Failed to initialize i2c regmap\n");
                return PTR_ERR(regmap);
index 57108d3..6f4dff3 100644 (file)
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi)
 {
        struct regmap *regmap;
 
-       regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
+       regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf);
        if (IS_ERR(regmap)) {
                dev_err(&spi->dev, "Failed to initialize spi regmap\n");
                return PTR_ERR(regmap);
index b67572c..9cbe98c 100644 (file)
@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
 int fxls8962af_core_remove(struct device *dev);
 
 extern const struct dev_pm_ops fxls8962af_pm_ops;
-extern const struct regmap_config fxls8962af_regmap_conf;
+extern const struct regmap_config fxls8962af_i2c_regmap_conf;
+extern const struct regmap_config fxls8962af_spi_regmap_conf;
 
 #endif                         /* _FXLS8962AF_H_ */
index 0fe5703..ac74cdc 100644 (file)
@@ -1590,11 +1590,14 @@ static int kxcjk1013_probe(struct i2c_client *client,
        ret = iio_device_register(indio_dev);
        if (ret < 0) {
                dev_err(&client->dev, "unable to register iio device\n");
-               goto err_buffer_cleanup;
+               goto err_pm_cleanup;
        }
 
        return 0;
 
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(&client->dev);
+       pm_runtime_disable(&client->dev);
 err_buffer_cleanup:
        iio_triggered_buffer_cleanup(indio_dev);
 err_trigger_unregister:
index 4c359fb..c53a339 100644 (file)
@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client,
        ret = iio_device_register(indio_dev);
        if (ret < 0) {
                dev_err(&client->dev, "unable to register iio device\n");
-               goto out_poweroff;
+               goto err_pm_cleanup;
        }
 
        return 0;
 
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(&client->dev);
+       pm_runtime_disable(&client->dev);
 out_poweroff:
        mma9551_set_device_state(client, false);
 
index 0570ab1..5ff6bc7 100644 (file)
@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client,
        ret = iio_device_register(indio_dev);
        if (ret < 0) {
                dev_err(&client->dev, "unable to register iio device\n");
-               goto out_poweroff;
+               goto err_pm_cleanup;
        }
 
        dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
        return 0;
 
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(&client->dev);
+       pm_runtime_disable(&client->dev);
 out_poweroff:
        mma9551_set_device_state(client, false);
        return ret;
index bc2cfa5..b400bbe 100644 (file)
@@ -76,7 +76,7 @@
 #define AD7124_CONFIG_REF_SEL(x)       FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
 #define AD7124_CONFIG_PGA_MSK          GENMASK(2, 0)
 #define AD7124_CONFIG_PGA(x)           FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
-#define AD7124_CONFIG_IN_BUFF_MSK      GENMASK(7, 6)
+#define AD7124_CONFIG_IN_BUFF_MSK      GENMASK(6, 5)
 #define AD7124_CONFIG_IN_BUFF(x)       FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
 
 /* AD7124_FILTER_X */
index 42ea8bc..adc5cea 100644 (file)
@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev,
        struct z188_adc *adc;
        struct iio_dev *indio_dev;
        struct resource *mem;
+       int ret;
 
        indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
        if (!indio_dev)
@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev,
        adc->mem = mem;
        mcb_set_drvdata(dev, indio_dev);
 
-       return iio_device_register(indio_dev);
+       ret = iio_device_register(indio_dev);
+       if (ret)
+               goto err_unmap;
+
+       return 0;
 
+err_unmap:
+       iounmap(adc->base);
 err:
        mcb_release_mem(mem);
        return -ENXIO;
index d84ae6b..e8fc4d0 100644 (file)
@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
        mutex_lock(&priv->slock);
 
        size = 0;
-       for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
+       for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) {
                size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
                tsc2046_adc_group_set_cmd(priv, group, ch_idx);
                group++;
@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
         * enabled.
         */
        size = 0;
-       for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
+       for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++)
                size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
 
        priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
index 5271073..acd230a 100644 (file)
@@ -134,7 +134,6 @@ struct ad74413r_state {
 #define AD74413R_CH_EN_MASK(x)         BIT(x)
 
 #define AD74413R_REG_DIN_COMP_OUT              0x25
-#define AD74413R_DIN_COMP_OUT_SHIFT_X(x)       x
 
 #define AD74413R_REG_ADC_RESULT_X(x)   (0x26 + (x))
 #define AD74413R_ADC_RESULT_MAX                GENMASK(15, 0)
@@ -288,7 +287,7 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
        unsigned int offset = 0;
        int ret;
 
-       for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+       for_each_set_bit_from(offset, mask, chip->ngpio) {
                unsigned int real_offset = st->gpo_gpio_offsets[offset];
 
                ret = ad74413r_set_gpo_config(st, real_offset,
@@ -316,7 +315,7 @@ static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
        if (ret)
                return ret;
 
-       status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset);
+       status &= BIT(real_offset);
 
        return status ? 1 : 0;
 }
@@ -334,11 +333,10 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
        if (ret)
                return ret;
 
-       for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+       for_each_set_bit_from(offset, mask, chip->ngpio) {
                unsigned int real_offset = st->comp_gpio_offsets[offset];
 
-               if (val & BIT(real_offset))
-                       *bits |= offset;
+               __assign_bit(offset, bits, val & BIT(real_offset));
        }
 
        return ret;
@@ -840,7 +838,7 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
 {
        struct ad74413r_state *st = iio_priv(indio_dev);
        struct spi_transfer *xfer = st->adc_samples_xfer;
-       u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE];
+       u8 *rx_buf = st->adc_samples_buf.rx_buf;
        u8 *tx_buf = st->adc_samples_tx_buf;
        unsigned int channel;
        int ret = -EINVAL;
@@ -894,9 +892,10 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
 
                spi_message_add_tail(xfer, &st->adc_samples_msg);
 
-               xfer++;
                tx_buf += AD74413R_FRAME_SIZE;
-               rx_buf += AD74413R_FRAME_SIZE;
+               if (xfer != st->adc_samples_xfer)
+                       rx_buf += AD74413R_FRAME_SIZE;
+               xfer++;
        }
 
        xfer->rx_buf = rx_buf;
index 6cdeb50..3f3c478 100644 (file)
@@ -348,7 +348,7 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
 
        vcm = regulator_get_voltage(st->reg);
 
-       if (vcm >= 0 && vcm < 1800000)
+       if (vcm < 1800000)
                mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
        else if (vcm > 1800000 && vcm < 2600000)
                mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
index 17b939a..81a6d09 100644 (file)
@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
        ret = iio_device_register(indio_dev);
        if (ret < 0) {
                dev_err(dev, "unable to register iio device\n");
-               goto err_buffer_cleanup;
+               goto err_pm_cleanup;
        }
 
        return 0;
 
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(dev);
+       pm_runtime_disable(dev);
 err_buffer_cleanup:
        iio_triggered_buffer_cleanup(indio_dev);
 err_trigger_unregister:
index ed12932..f9b4540 100644 (file)
@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi)
 {
        const struct spi_device_id *id = spi_get_device_id(spi);
        const struct adis_data *adis16480_data;
+       irq_handler_t trigger_handler = NULL;
        struct iio_dev *indio_dev;
        struct adis16480 *st;
        int ret;
@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi)
                st->clk_freq = st->chip_info->int_clk;
        }
 
+       /* Only use our trigger handler if burst mode is supported */
+       if (adis16480_data->burst_len)
+               trigger_handler = adis16480_trigger_handler;
+
        ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
-                                                adis16480_trigger_handler);
+                                                trigger_handler);
        if (ret)
                return ret;
 
index 1dabfd6..f897244 100644 (file)
@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client,
        ret = iio_device_register(data->acc_indio_dev);
        if (ret < 0) {
                dev_err(&client->dev, "Failed to register acc iio device\n");
-               goto err_buffer_cleanup_mag;
+               goto err_pm_cleanup;
        }
 
        ret = iio_device_register(data->mag_indio_dev);
@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client,
 
 err_iio_unregister_acc:
        iio_device_unregister(data->acc_indio_dev);
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(&client->dev);
+       pm_runtime_disable(&client->dev);
 err_buffer_cleanup_mag:
        if (client->irq > 0)
                iio_triggered_buffer_cleanup(data->mag_indio_dev);
index 727b4b6..93f0c6b 100644 (file)
@@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
        if (err < 0)
                return err;
 
+       /*
+        * we need to wait for sensor settling time before
+        * reading data in order to avoid corrupted samples
+        */
        delay = 1000000000 / sensor->odr;
-       usleep_range(delay, 2 * delay);
+       usleep_range(3 * delay, 4 * delay);
 
        err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
        if (err < 0)
index 94eb9f6..208b519 100644 (file)
@@ -1569,9 +1569,17 @@ static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg
        }
 
        if (copy_to_user(ival, &fd, sizeof(fd))) {
-               put_unused_fd(fd);
-               ret = -EFAULT;
-               goto error_free_ib;
+               /*
+                * "Leak" the fd, as there's not much we can do about this
+                * anyway. 'fd' might have been closed already, as
+                * anon_inode_getfd() called fd_install() on it, which made
+                * it reachable by userland.
+                *
+                * Instead of allowing a malicious user to play tricks with
+                * us, rely on the process exit path to do any necessary
+                * cleanup, as in releasing the file, if still needed.
+                */
+               return -EFAULT;
        }
 
        return 0;
index f96f531..3d4d21f 100644 (file)
@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
        ret = iio_device_register(indio_dev);
        if (ret < 0) {
                dev_err(dev, "unable to register iio device\n");
-               goto err_disable_runtime_pm;
+               goto err_pm_cleanup;
        }
 
        dev_dbg(dev, "Registered device %s\n", name);
        return 0;
 
-err_disable_runtime_pm:
+err_pm_cleanup:
+       pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
 err_buffer_cleanup:
        iio_triggered_buffer_cleanup(indio_dev);
index c447526..50c5340 100644 (file)
@@ -3370,22 +3370,30 @@ err:
 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
                         const struct sockaddr *dst_addr)
 {
-       if (!src_addr || !src_addr->sa_family) {
-               src_addr = (struct sockaddr *) &id->route.addr.src_addr;
-               src_addr->sa_family = dst_addr->sa_family;
-               if (IS_ENABLED(CONFIG_IPV6) &&
-                   dst_addr->sa_family == AF_INET6) {
-                       struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
-                       struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
-                       src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
-                       if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
-                               id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
-               } else if (dst_addr->sa_family == AF_IB) {
-                       ((struct sockaddr_ib *) src_addr)->sib_pkey =
-                               ((struct sockaddr_ib *) dst_addr)->sib_pkey;
-               }
-       }
-       return rdma_bind_addr(id, src_addr);
+       struct sockaddr_storage zero_sock = {};
+
+       if (src_addr && src_addr->sa_family)
+               return rdma_bind_addr(id, src_addr);
+
+       /*
+        * When the src_addr is not specified, automatically supply an any addr
+        */
+       zero_sock.ss_family = dst_addr->sa_family;
+       if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+               struct sockaddr_in6 *src_addr6 =
+                       (struct sockaddr_in6 *)&zero_sock;
+               struct sockaddr_in6 *dst_addr6 =
+                       (struct sockaddr_in6 *)dst_addr;
+
+               src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+               if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+                       id->route.addr.dev_addr.bound_dev_if =
+                               dst_addr6->sin6_scope_id;
+       } else if (dst_addr->sa_family == AF_IB) {
+               ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+                       ((struct sockaddr_ib *)dst_addr)->sib_pkey;
+       }
+       return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
 }
 
 /*
index 0a3b281..41c2729 100644 (file)
@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = {
 };
 
 static const struct attribute_group port_diagc_group = {
-       .name = "linkcontrol",
+       .name = "diag_counters",
        .attrs = port_diagc_attributes,
 };
 
index 7c3f98e..759b85f 100644 (file)
@@ -2682,6 +2682,8 @@ static void rtrs_clt_dev_release(struct device *dev)
        struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
                                                 dev);
 
+       mutex_destroy(&clt->paths_ev_mutex);
+       mutex_destroy(&clt->paths_mutex);
        kfree(clt);
 }
 
@@ -2711,6 +2713,8 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
                return ERR_PTR(-ENOMEM);
        }
 
+       clt->dev.class = rtrs_clt_dev_class;
+       clt->dev.release = rtrs_clt_dev_release;
        uuid_gen(&clt->paths_uuid);
        INIT_LIST_HEAD_RCU(&clt->paths_list);
        clt->paths_num = paths_num;
@@ -2727,53 +2731,51 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
        init_waitqueue_head(&clt->permits_wait);
        mutex_init(&clt->paths_ev_mutex);
        mutex_init(&clt->paths_mutex);
+       device_initialize(&clt->dev);
 
-       clt->dev.class = rtrs_clt_dev_class;
-       clt->dev.release = rtrs_clt_dev_release;
        err = dev_set_name(&clt->dev, "%s", sessname);
        if (err)
-               goto err;
+               goto err_put;
+
        /*
         * Suppress user space notification until
         * sysfs files are created
         */
        dev_set_uevent_suppress(&clt->dev, true);
-       err = device_register(&clt->dev);
-       if (err) {
-               put_device(&clt->dev);
-               goto err;
-       }
+       err = device_add(&clt->dev);
+       if (err)
+               goto err_put;
 
        clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
        if (!clt->kobj_paths) {
                err = -ENOMEM;
-               goto err_dev;
+               goto err_del;
        }
        err = rtrs_clt_create_sysfs_root_files(clt);
        if (err) {
                kobject_del(clt->kobj_paths);
                kobject_put(clt->kobj_paths);
-               goto err_dev;
+               goto err_del;
        }
        dev_set_uevent_suppress(&clt->dev, false);
        kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
 
        return clt;
-err_dev:
-       device_unregister(&clt->dev);
-err:
+err_del:
+       device_del(&clt->dev);
+err_put:
        free_percpu(clt->pcpu_path);
-       kfree(clt);
+       put_device(&clt->dev);
        return ERR_PTR(err);
 }
 
 static void free_clt(struct rtrs_clt_sess *clt)
 {
-       free_permits(clt);
        free_percpu(clt->pcpu_path);
-       mutex_destroy(&clt->paths_ev_mutex);
-       mutex_destroy(&clt->paths_mutex);
-       /* release callback will free clt in last put */
+
+       /*
+        * release callback will free clt and destroy mutexes in last put
+        */
        device_unregister(&clt->dev);
 }
 
@@ -2890,6 +2892,7 @@ void rtrs_clt_close(struct rtrs_clt_sess *clt)
                rtrs_clt_destroy_path_files(clt_path, NULL);
                kobject_put(&clt_path->kobj);
        }
+       free_permits(clt);
        free_clt(clt);
 }
 EXPORT_SYMBOL(rtrs_clt_close);
index e174e85..285b766 100644 (file)
@@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
                spin_unlock(&host->target_lock);
 
                /*
-                * Wait for tl_err and target port removal tasks.
+                * srp_queue_remove_work() queues a call to
+                * srp_remove_target(). The latter function cancels
+                * target->tl_err_work so waiting for the remove works to
+                * finish is sufficient.
                 */
-               flush_workqueue(system_long_wq);
                flush_workqueue(srp_remove_wq);
 
                kfree(host);
index ccaeb24..c3139bc 100644 (file)
@@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev)
        /* KEY_RESERVED is not supposed to be transmitted to userspace. */
        __clear_bit(KEY_RESERVED, dev->keybit);
 
+       /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
+       if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
+               __clear_bit(BTN_RIGHT, dev->keybit);
+               __clear_bit(BTN_MIDDLE, dev->keybit);
+       }
+
        /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
        input_cleanse_bitmasks(dev);
 
index 0c607da..9417ee0 100644 (file)
@@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX
 
 config KEYBOARD_SAMSUNG
        tristate "Samsung keypad support"
-       depends on HAVE_CLK
+       depends on HAS_IOMEM && HAVE_CLK
        select INPUT_MATRIXKMAP
        help
          Say Y here if you want to use the keypad on your Samsung mobile
index 47af62c..e1758d5 100644 (file)
@@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
        return 0;
 }
 
-static int elan_enable_power(struct elan_tp_data *data)
+static int elan_set_power(struct elan_tp_data *data, bool on)
 {
        int repeat = ETP_RETRY_COUNT;
        int error;
 
-       error = regulator_enable(data->vcc);
-       if (error) {
-               dev_err(&data->client->dev,
-                       "failed to enable regulator: %d\n", error);
-               return error;
-       }
-
        do {
-               error = data->ops->power_control(data->client, true);
+               error = data->ops->power_control(data->client, on);
                if (error >= 0)
                        return 0;
 
                msleep(30);
        } while (--repeat > 0);
 
-       dev_err(&data->client->dev, "failed to enable power: %d\n", error);
-       return error;
-}
-
-static int elan_disable_power(struct elan_tp_data *data)
-{
-       int repeat = ETP_RETRY_COUNT;
-       int error;
-
-       do {
-               error = data->ops->power_control(data->client, false);
-               if (!error) {
-                       error = regulator_disable(data->vcc);
-                       if (error) {
-                               dev_err(&data->client->dev,
-                                       "failed to disable regulator: %d\n",
-                                       error);
-                               /* Attempt to power the chip back up */
-                               data->ops->power_control(data->client, true);
-                               break;
-                       }
-
-                       return 0;
-               }
-
-               msleep(30);
-       } while (--repeat > 0);
-
-       dev_err(&data->client->dev, "failed to disable power: %d\n", error);
+       dev_err(&data->client->dev, "failed to set power %s: %d\n",
+               on ? "on" : "off", error);
        return error;
 }
 
@@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev)
                /* Enable wake from IRQ */
                data->irq_wake = (enable_irq_wake(client->irq) == 0);
        } else {
-               ret = elan_disable_power(data);
+               ret = elan_set_power(data, false);
+               if (ret)
+                       goto err;
+
+               ret = regulator_disable(data->vcc);
+               if (ret) {
+                       dev_err(dev, "error %d disabling regulator\n", ret);
+                       /* Attempt to power the chip back up */
+                       elan_set_power(data, true);
+               }
        }
 
+err:
        mutex_unlock(&data->sysfs_mutex);
        return ret;
 }
@@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev)
        struct elan_tp_data *data = i2c_get_clientdata(client);
        int error;
 
-       if (device_may_wakeup(dev) && data->irq_wake) {
+       if (!device_may_wakeup(dev)) {
+               error = regulator_enable(data->vcc);
+               if (error) {
+                       dev_err(dev, "error %d enabling regulator\n", error);
+                       goto err;
+               }
+       } else if (data->irq_wake) {
                disable_irq_wake(client->irq);
                data->irq_wake = false;
        }
 
-       error = elan_enable_power(data);
+       error = elan_set_power(data, true);
        if (error) {
                dev_err(dev, "power up when resuming failed: %d\n", error);
                goto err;
index a472489..164f6c7 100644 (file)
@@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client)
                                    "Marking SMBus companion %s as gone\n",
                                    dev_name(&smbdev->client->dev));
                        smbdev->dead = true;
+                       device_link_remove(&smbdev->client->dev,
+                                          &smbdev->psmouse->ps2dev.serio->dev);
                        serio_rescan(smbdev->psmouse->ps2dev.serio);
                } else {
                        list_del(&smbdev->node);
@@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse)
                kfree(smbdev);
        } else {
                smbdev->dead = true;
+               device_link_remove(&smbdev->client->dev,
+                                  &psmouse->ps2dev.serio->dev);
                psmouse_dbg(smbdev->psmouse,
                            "posting removal request for SMBus companion %s\n",
                            dev_name(&smbdev->client->dev));
@@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse,
 
        if (smbdev->client) {
                /* We have our companion device */
+               if (!device_link_add(&smbdev->client->dev,
+                                    &psmouse->ps2dev.serio->dev,
+                                    DL_FLAG_STATELESS))
+                       psmouse_warn(psmouse,
+                                    "failed to set up link with iSMBus companion %s\n",
+                                    dev_name(&smbdev->client->dev));
                return 0;
        }
 
index a3bfc7a..752e8ba 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/platform_data/x86/soc.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
@@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts)
 }
 
 #ifdef ACPI_GPIO_SUPPORT
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
-static const struct x86_cpu_id baytrail_cpu_ids[] = {
-       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, },
-       {}
-};
-
-static inline bool is_byt(void)
-{
-       const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids);
-
-       return !!id;
-}
-
 static const struct acpi_gpio_params first_gpio = { 0, 0, false };
 static const struct acpi_gpio_params second_gpio = { 1, 0, false };
 
@@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
        const struct acpi_gpio_mapping *gpio_mapping = NULL;
        struct device *dev = &ts->client->dev;
        LIST_HEAD(resources);
-       int ret;
+       int irq, ret;
 
        ts->gpio_count = 0;
        ts->gpio_int_idx = -1;
@@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
 
        acpi_dev_free_resource_list(&resources);
 
+       /*
+        * CHT devices should have a GpioInt + a regular GPIO ACPI resource.
+        * Some CHT devices have a bug (where the also is bogus Interrupt
+        * resource copied from a previous BYT based generation). i2c-core-acpi
+        * will use the non-working Interrupt resource, fix this up.
+        */
+       if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) {
+               irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+               if (irq > 0 && irq != ts->client->irq) {
+                       dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq);
+                       ts->client->irq = irq;
+               }
+       }
+
        if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
                gpio_mapping = acpi_goodix_int_first_gpios;
@@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
                dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n");
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD;
                gpio_mapping = acpi_goodix_reset_only_gpios;
-       } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
+       } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
                dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
                gpio_mapping = acpi_goodix_int_last_gpios;
index 7c82c4f..129ebc8 100644 (file)
@@ -571,8 +571,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
 
 #ifdef CONFIG_OF
 static const struct of_device_id zinitix_of_match[] = {
+       { .compatible = "zinitix,bt402" },
+       { .compatible = "zinitix,bt403" },
+       { .compatible = "zinitix,bt404" },
+       { .compatible = "zinitix,bt412" },
+       { .compatible = "zinitix,bt413" },
+       { .compatible = "zinitix,bt431" },
+       { .compatible = "zinitix,bt432" },
+       { .compatible = "zinitix,bt531" },
        { .compatible = "zinitix,bt532" },
+       { .compatible = "zinitix,bt538" },
        { .compatible = "zinitix,bt541" },
+       { .compatible = "zinitix,bt548" },
+       { .compatible = "zinitix,bt554" },
+       { .compatible = "zinitix,at100" },
        { }
 };
 MODULE_DEVICE_TABLE(of, zinitix_of_match);
index 416815a..bb95edf 100644 (file)
@@ -14,6 +14,7 @@
 extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
 extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
 extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
 extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
 extern int amd_iommu_init_devices(void);
 extern void amd_iommu_uninit_devices(void);
index ffc89c4..47108ed 100644 (file)
 #define PASID_MASK             0x0000ffff
 
 /* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK      (1 << 0)
 #define MMIO_STATUS_EVT_INT_MASK       (1 << 1)
 #define MMIO_STATUS_COM_WAIT_INT_MASK  (1 << 2)
 #define MMIO_STATUS_PPR_INT_MASK       (1 << 6)
index b10fb52..7bfe37e 100644 (file)
@@ -657,6 +657,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
        return iommu->cmd_buf ? 0 : -ENOMEM;
 }
 
+/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+       iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
 /*
  * This function resets the command buffer if the IOMMU stopped fetching
  * commands from it.
index b1bf412..6608d17 100644 (file)
@@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop)
 
        dom = container_of(pgtable, struct protection_domain, iop);
 
-       /* Update data structure */
-       amd_iommu_domain_clr_pt_root(dom);
-
-       /* Make changes visible to IOMMUs */
-       amd_iommu_domain_update(dom);
-
        /* Page-table is not visible to IOMMU anymore, so free it */
        BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
               pgtable->mode > PAGE_MODE_6_LEVEL);
 
        free_sub_pt(pgtable->root, pgtable->mode, &freelist);
 
+       /* Update data structure */
+       amd_iommu_domain_clr_pt_root(dom);
+
+       /* Make changes visible to IOMMUs */
+       amd_iommu_domain_update(dom);
+
        put_pages_list(&freelist);
 }
 
index 461f184..a18b549 100644 (file)
@@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
 #endif /* !CONFIG_IRQ_REMAP */
 
 #define AMD_IOMMU_INT_MASK     \
-       (MMIO_STATUS_EVT_INT_MASK | \
+       (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+        MMIO_STATUS_EVT_INT_MASK | \
         MMIO_STATUS_PPR_INT_MASK | \
         MMIO_STATUS_GALOG_INT_MASK)
 
@@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
        u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 
        while (status & AMD_IOMMU_INT_MASK) {
-               /* Enable EVT and PPR and GA interrupts again */
+               /* Enable interrupt sources again */
                writel(AMD_IOMMU_INT_MASK,
                        iommu->mmio_base + MMIO_STATUS_OFFSET);
 
@@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
                }
 #endif
 
+               if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+                       pr_info_ratelimited("IOMMU event log overflow\n");
+                       amd_iommu_restart_event_logging(iommu);
+               }
+
                /*
                 * Hardware bug: ERBT1312
                 * When re-enabling interrupt (by writing 1
index 92fea3f..5b196cf 100644 (file)
@@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
-       if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
+       if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
                        dev_err(dev, "PASID table allocation failed\n");
index e900e3c..2561ce8 100644 (file)
@@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
                return NULL;
 
        mc = platform_get_drvdata(pdev);
-       if (!mc)
+       if (!mc) {
+               put_device(&pdev->dev);
                return NULL;
+       }
 
        return mc->smmu;
 }
index 9e93ff2..cd77297 100644 (file)
@@ -5517,6 +5517,9 @@ int __init its_lpi_memreserve_init(void)
        if (!efi_enabled(EFI_CONFIG_TABLES))
                return 0;
 
+       if (list_empty(&its_nodes))
+               return 0;
+
        gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
        state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
                                  "irqchip/arm/gicv3/memreserve:online",
index 259065d..09cc982 100644 (file)
@@ -398,3 +398,4 @@ out_free_priv:
 
 IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
 IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
index dcbd6d2..997ace4 100644 (file)
@@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
        set_bit(DMF_FREEING, &md->flags);
        spin_unlock(&_minor_lock);
 
-       blk_set_queue_dying(md->queue);
+       blk_mark_disk_dead(md->disk);
 
        /*
         * Take suspend_lock so that presuspend and postsuspend methods
index bb9c451..9fbfe78 100644 (file)
@@ -114,6 +114,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
        if (offset + count > EE1004_PAGE_SIZE)
                count = EE1004_PAGE_SIZE - offset;
 
+       if (count > I2C_SMBUS_BLOCK_MAX)
+               count = I2C_SMBUS_BLOCK_MAX;
+
        return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
 }
 
index 4ccbf43..aa1682b 100644 (file)
@@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
        }
 
        if (copy_to_user(argp, &bp, sizeof(bp))) {
-               dma_buf_put(buf->dmabuf);
+               /*
+                * The usercopy failed, but we can't do much about it, as
+                * dma_buf_fd() already called fd_install() and made the
+                * file descriptor accessible for the current process. It
+                * might already be closed and dmabuf no longer valid when
+                * we reach this point. Therefore "leak" the fd and rely on
+                * the process exit path to do any required cleanup.
+                */
                return -EFAULT;
        }
 
index 4e61b28..8d718aa 100644 (file)
@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = mq->card;
        struct mmc_host *host = card->host;
        blk_status_t error = BLK_STS_OK;
-       int retries = 0;
 
        do {
                u32 status;
                int err;
+               int retries = 0;
 
-               mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+               while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+                       mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
 
-               mmc_wait_for_req(host, mrq);
+                       mmc_wait_for_req(host, mrq);
 
-               err = mmc_send_status(card, &status);
-               if (err)
-                       goto error_exit;
-
-               if (!mmc_host_is_spi(host) &&
-                   !mmc_ready_for_data(status)) {
-                       err = mmc_blk_fix_state(card, req);
+                       err = mmc_send_status(card, &status);
                        if (err)
                                goto error_exit;
-               }
 
-               if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
-                       continue;
+                       if (!mmc_host_is_spi(host) &&
+                           !mmc_ready_for_data(status)) {
+                               err = mmc_blk_fix_state(card, req);
+                               if (err)
+                                       goto error_exit;
+                       }
 
-               retries = 0;
+                       if (!mrq->cmd->error)
+                               break;
+               }
 
                if (mrq->cmd->error ||
                    mrq->data->error ||
index 45f5787..bd87012 100644 (file)
@@ -67,7 +67,7 @@ static const unsigned int sd_au_size[] = {
                __res & __mask;                                         \
        })
 
-#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
 #define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
 
 struct sd_busy_data {
@@ -1664,6 +1664,12 @@ static int sd_poweroff_notify(struct mmc_card *card)
                goto out;
        }
 
+       /* Find out when the command is completed. */
+       err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+                               MMC_BUSY_EXTR_SINGLE);
+       if (err)
+               goto out;
+
        cb_data.card = card;
        cb_data.reg_buf = reg_buf;
        err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
index 16d1c7a..b6eb75f 100644 (file)
@@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev)
        if (!IS_ERR_OR_NULL(host->dma_chan_rx))
                dma_release_channel(host->dma_chan_rx);
        mmc_remove_host(mmc);
-       mmc_free_host(mmc);
 
        writel(0, host->base + REG_INTERRUPT_MASK);
        writel(0, host->base + REG_POWER_CONTROL);
        writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
               host->base + REG_CLOCK_CONTROL);
+       mmc_free_host(mmc);
 
        return 0;
 }
index a593b1f..0f3658b 100644 (file)
@@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
 
 static int esdhc_of_enable_dma(struct sdhci_host *host)
 {
+       int ret;
        u32 value;
        struct device *dev = mmc_dev(host->mmc);
 
        if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
-           of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
-               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+           of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+               if (ret)
+                       return ret;
+       }
 
        value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
 
index bcc595c..104dcd7 100644 (file)
@@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
        struct dma_slave_config cfg = { 0, };
 
        res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+       if (!res)
+               return -EINVAL;
+
        cfg.direction = direction;
 
        if (direction == DMA_DEV_TO_MEM) {
index 6ed6c51..d503821 100644 (file)
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
                }
        }
 
-       if (erasesize)
-               div_u64_rem(len, (uint32_t)erasesize, &rem);
-
        if (len == 0 || erasesize == 0 || erasesize > len
-           || erasesize > UINT_MAX || rem) {
+           || erasesize > UINT_MAX) {
                parse_err("illegal erasesize or len\n");
                ret = -EINVAL;
                goto error;
        }
 
+       div_u64_rem(len, (uint32_t)erasesize, &rem);
+       if (rem) {
+               parse_err("len is not multiple of erasesize\n");
+               ret = -EINVAL;
+               goto error;
+       }
+
        ret = register_device(name, start, len, (uint32_t)erasesize);
        if (ret)
                goto error;
index 70f492d..eef87b2 100644 (file)
@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
        config.stride = 1;
        config.read_only = true;
        config.root_only = true;
+       config.ignore_wp = true;
        config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
        config.priv = mtd;
 
@@ -833,6 +834,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
        config.owner = THIS_MODULE;
        config.type = NVMEM_TYPE_OTP;
        config.root_only = true;
+       config.ignore_wp = true;
        config.reg_read = reg_read;
        config.size = size;
        config.of_node = np;
index 20408b7..820e5dc 100644 (file)
@@ -42,7 +42,7 @@ config MTD_NAND_OMAP2
        tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
        depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
        depends on HAS_IOMEM
-       select OMAP_GPMC if ARCH_K3
+       depends on OMAP_GPMC
        help
          Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
          and Keystone platforms.
index f759297..aee78f5 100644 (file)
@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
                                        mtd->oobsize / trans,
                                        host->hwcfg.sector_size_1k);
 
-               if (!ret) {
+               if (ret != -EBADMSG) {
                        *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
 
                        if (*err_addr)
index 1b64c5a..ded4df4 100644 (file)
@@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
                this->hw.must_apply_timings = false;
                ret = gpmi_nfc_apply_timings(this);
                if (ret)
-                       return ret;
+                       goto out_pm;
        }
 
        dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2414,6 +2414,7 @@ unmap:
 
        this->bch = false;
 
+out_pm:
        pm_runtime_mark_last_busy(this->dev);
        pm_runtime_put_autosuspend(this->dev);
 
index efe0ffe..9054559 100644 (file)
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
        struct ingenic_ecc *ecc;
 
        pdev = of_find_device_by_node(np);
-       if (!pdev || !platform_get_drvdata(pdev))
+       if (!pdev)
                return ERR_PTR(-EPROBE_DEFER);
 
+       if (!platform_get_drvdata(pdev)) {
+               put_device(&pdev->dev);
+               return ERR_PTR(-EPROBE_DEFER);
+       }
+
        ecc = platform_get_drvdata(pdev);
        clk_prepare_enable(ecc->clk);
 
index 7c6efa3..1a77542 100644 (file)
@@ -2,7 +2,6 @@
 /*
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  */
-
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
@@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
        if (dma_mapping_error(dev, nandc->base_dma))
                return -ENXIO;
 
-       ret = qcom_nandc_alloc(nandc);
-       if (ret)
-               goto err_nandc_alloc;
-
        ret = clk_prepare_enable(nandc->core_clk);
        if (ret)
                goto err_core_clk;
@@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
        if (ret)
                goto err_aon_clk;
 
+       ret = qcom_nandc_alloc(nandc);
+       if (ret)
+               goto err_nandc_alloc;
+
        ret = qcom_nandc_setup(nandc);
        if (ret)
                goto err_setup;
@@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
        return 0;
 
 err_setup:
+       qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
        clk_disable_unprepare(nandc->aon_clk);
 err_aon_clk:
        clk_disable_unprepare(nandc->core_clk);
 err_core_clk:
-       qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
        dma_unmap_resource(dev, res->start, resource_size(res),
                           DMA_BIDIRECTIONAL, 0);
-
        return ret;
 }
 
index 06a818c..4311b89 100644 (file)
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
                               const struct mtd_partition **pparts,
                               struct mtd_part_parser_data *data)
 {
+       size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+       int ret, i, j, tmpparts, numparts = 0;
        struct smem_flash_pentry *pentry;
        struct smem_flash_ptable *ptable;
-       size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
        struct mtd_partition *parts;
-       int ret, i, numparts;
        char *name, *c;
 
        if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
        pr_debug("Parsing partition table info from SMEM\n");
        ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
        if (IS_ERR(ptable)) {
-               pr_err("Error reading partition table header\n");
+               if (PTR_ERR(ptable) != -EPROBE_DEFER)
+                       pr_err("Error reading partition table header\n");
                return PTR_ERR(ptable);
        }
 
@@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
        }
 
        /* Ensure that # of partitions is less than the max we have allocated */
-       numparts = le32_to_cpu(ptable->numparts);
-       if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+       tmpparts = le32_to_cpu(ptable->numparts);
+       if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
                pr_err("Partition numbers exceed the max limit\n");
                return -EINVAL;
        }
@@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
                return PTR_ERR(ptable);
        }
 
+       for (i = 0; i < tmpparts; i++) {
+               pentry = &ptable->pentry[i];
+               if (pentry->name[0] != '\0')
+                       numparts++;
+       }
+
        parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
        if (!parts)
                return -ENOMEM;
 
-       for (i = 0; i < numparts; i++) {
+       for (i = 0, j = 0; i < tmpparts; i++) {
                pentry = &ptable->pentry[i];
                if (pentry->name[0] == '\0')
                        continue;
@@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
                for (c = name; *c != '\0'; c++)
                        *c = tolower(*c);
 
-               parts[i].name = name;
-               parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
-               parts[i].mask_flags = pentry->attr;
-               parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+               parts[j].name = name;
+               parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+               parts[j].mask_flags = pentry->attr;
+               parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
                pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
                         i, pentry->name, le32_to_cpu(pentry->offset),
                         le32_to_cpu(pentry->length), pentry->attr);
+               j++;
        }
 
        pr_debug("SMEM partition table found: ver: %d len: %d\n",
-                le32_to_cpu(ptable->version), numparts);
+                le32_to_cpu(ptable->version), tmpparts);
        *pparts = parts;
 
        return numparts;
 
 out_free_parts:
-       while (--i >= 0)
-               kfree(parts[i].name);
+       while (--j >= 0)
+               kfree(parts[j].name);
        kfree(parts);
        *pparts = NULL;
 
@@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
 
        for (i = 0; i < nr_parts; i++)
                kfree(pparts[i].name);
+
+       kfree(pparts);
 }
 
 static const struct of_device_id qcomsmem_of_match_table[] = {
index 6382e19..c580acb 100644 (file)
@@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        ci = (struct com20020_pci_card_info *)id->driver_data;
+       if (!ci)
+               return -EINVAL;
+
        priv->ci = ci;
        mm = &ci->misc_map;
 
index 6006c2e..a86b1f7 100644 (file)
@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
        if (bond == NULL)
                return 0;
 
-       return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+       return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
 }
 
 /**
@@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
                                if (port->aggregator &&
                                    port->aggregator->is_active &&
                                    !__port_is_enabled(port)) {
-
                                        __enable_port(port);
+                                       *update_slave_arr = true;
                                }
                        }
                        break;
@@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
                             port = port->next_port_in_aggregator) {
                                __enable_port(port);
                        }
+                       *update_slave_arr = true;
                }
        }
 
@@ -1994,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
  */
 void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
 {
-       BOND_AD_INFO(bond).agg_select_timer = timeout;
+       atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
 }
 
 /**
@@ -2277,6 +2278,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
        spin_unlock_bh(&bond->mode_lock);
 }
 
+/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond:  bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+       int val, nval;
+
+       while (1) {
+               val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+               if (!val)
+                       return false;
+               nval = val - 1;
+               if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+                                  val, nval) == val)
+                       break;
+       }
+       return nval == 0;
+}
+
 /**
  * bond_3ad_state_machine_handler - handle state machines timeout
  * @work: work context to fetch bonding struct to work on from
@@ -2312,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        if (!bond_has_slaves(bond))
                goto re_arm;
 
-       /* check if agg_select_timer timer after initialize is timed out */
-       if (BOND_AD_INFO(bond).agg_select_timer &&
-           !(--BOND_AD_INFO(bond).agg_select_timer)) {
+       if (bond_agg_timer_advance(bond)) {
                slave = bond_first_slave_rcu(bond);
                port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
 
index 238b56d..aebeb46 100644 (file)
@@ -2379,10 +2379,9 @@ static int __bond_release_one(struct net_device *bond_dev,
                bond_select_active_slave(bond);
        }
 
-       if (!bond_has_slaves(bond)) {
-               bond_set_carrier(bond);
+       bond_set_carrier(bond);
+       if (!bond_has_slaves(bond))
                eth_hw_addr_random(bond_dev);
-       }
 
        unblock_netpoll_tx();
        synchronize_rcu();
index b7dc1c3..acd7472 100644 (file)
@@ -1715,15 +1715,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
 
        netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
                       RCANFD_NAPI_WEIGHT);
+       spin_lock_init(&priv->tx_lock);
+       devm_can_led_init(ndev);
+       gpriv->ch[priv->channel] = priv;
        err = register_candev(ndev);
        if (err) {
                dev_err(&pdev->dev,
                        "register_candev() failed, error %d\n", err);
                goto fail_candev;
        }
-       spin_lock_init(&priv->tx_lock);
-       devm_can_led_init(ndev);
-       gpriv->ch[priv->channel] = priv;
        dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
        return 0;
 
index 2ed2370..2d73ebb 100644 (file)
@@ -1787,7 +1787,7 @@ static int es58x_open(struct net_device *netdev)
        struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
        int ret;
 
-       if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+       if (!es58x_dev->opened_channel_cnt) {
                ret = es58x_alloc_rx_urbs(es58x_dev);
                if (ret)
                        return ret;
@@ -1805,12 +1805,13 @@ static int es58x_open(struct net_device *netdev)
        if (ret)
                goto free_urbs;
 
+       es58x_dev->opened_channel_cnt++;
        netif_start_queue(netdev);
 
        return ret;
 
  free_urbs:
-       if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+       if (!es58x_dev->opened_channel_cnt)
                es58x_free_urbs(es58x_dev);
        netdev_err(netdev, "%s: Could not open the network device: %pe\n",
                   __func__, ERR_PTR(ret));
@@ -1845,7 +1846,8 @@ static int es58x_stop(struct net_device *netdev)
 
        es58x_flush_pending_tx_msg(netdev);
 
-       if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+       es58x_dev->opened_channel_cnt--;
+       if (!es58x_dev->opened_channel_cnt)
                es58x_free_urbs(es58x_dev);
 
        return 0;
@@ -2215,7 +2217,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
        init_usb_anchor(&es58x_dev->tx_urbs_idle);
        init_usb_anchor(&es58x_dev->tx_urbs_busy);
        atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
-       atomic_set(&es58x_dev->opened_channel_cnt, 0);
        usb_set_intfdata(intf, es58x_dev);
 
        es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
index 826a158..e5033cb 100644 (file)
@@ -373,8 +373,6 @@ struct es58x_operators {
  *     queue wake/stop logic should prevent this URB from getting
  *     empty. Please refer to es58x_get_tx_urb() for more details.
  * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- *     and es58x_stop()).
  * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
  *     was called.
  * @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -384,6 +382,10 @@ struct es58x_operators {
  *     in RX branches.
  * @rx_max_packet_size: Maximum length of bulk-in URB.
  * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ *     conditions because its two users (net_device_ops:ndo_open()
+ *     and net_device_ops:ndo_close()) guarantee that the network
+ *     stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
  * @rx_cmd_buf_len: Length of @rx_cmd_buf.
  * @rx_cmd_buf: The device might split the URB commands in an
  *     arbitrary amount of pieces. This buffer is used to concatenate
@@ -406,7 +408,6 @@ struct es58x_device {
        struct usb_anchor tx_urbs_busy;
        struct usb_anchor tx_urbs_idle;
        atomic_t tx_urbs_idle_cnt;
-       atomic_t opened_channel_cnt;
 
        u64 ktime_req_ns;
        s64 realtime_diff_ns;
@@ -415,6 +416,7 @@ struct es58x_device {
 
        u16 rx_max_packet_size;
        u8 num_can_ch;
+       u8 opened_channel_cnt;
 
        u16 rx_cmd_buf_len;
        union es58x_urb_cmd rx_cmd_buf;
index b487e3f..d35749f 100644 (file)
@@ -191,8 +191,8 @@ struct gs_can {
 struct gs_usb {
        struct gs_can *canch[GS_MAX_INTF];
        struct usb_anchor rx_submitted;
-       atomic_t active_channels;
        struct usb_device *udev;
+       u8 active_channels;
 };
 
 /* 'allocate' a tx context.
@@ -589,7 +589,7 @@ static int gs_can_open(struct net_device *netdev)
        if (rc)
                return rc;
 
-       if (atomic_add_return(1, &parent->active_channels) == 1) {
+       if (!parent->active_channels) {
                for (i = 0; i < GS_MAX_RX_URBS; i++) {
                        struct urb *urb;
                        u8 *buf;
@@ -690,6 +690,7 @@ static int gs_can_open(struct net_device *netdev)
 
        dev->can.state = CAN_STATE_ERROR_ACTIVE;
 
+       parent->active_channels++;
        if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
                netif_start_queue(netdev);
 
@@ -705,7 +706,8 @@ static int gs_can_close(struct net_device *netdev)
        netif_stop_queue(netdev);
 
        /* Stop polling */
-       if (atomic_dec_and_test(&parent->active_channels))
+       parent->active_channels--;
+       if (!parent->active_channels)
                usb_kill_anchored_urbs(&parent->rx_submitted);
 
        /* Stop sending URBs */
@@ -984,8 +986,6 @@ static int gs_usb_probe(struct usb_interface *intf,
 
        init_usb_anchor(&dev->rx_submitted);
 
-       atomic_set(&dev->active_channels, 0);
-
        usb_set_intfdata(intf, dev);
        dev->udev = interface_to_usbdev(intf);
 
index c0c9144..0029d27 100644 (file)
@@ -82,6 +82,7 @@ config NET_DSA_REALTEK_SMI
 
 config NET_DSA_SMSC_LAN9303
        tristate
+       depends on VLAN_8021Q || VLAN_8021Q=n
        select NET_DSA_TAG_LAN9303
        select REGMAP
        help
index 33499fc..6afb5db 100644 (file)
@@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
        get_device(&priv->master_mii_bus->dev);
        priv->master_mii_dn = dn;
 
-       priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+       priv->slave_mii_bus = mdiobus_alloc();
        if (!priv->slave_mii_bus) {
                of_node_put(dn);
                return -ENOMEM;
@@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
        }
 
        err = mdiobus_register(priv->slave_mii_bus);
-       if (err && dn)
+       if (err && dn) {
+               mdiobus_free(priv->slave_mii_bus);
                of_node_put(dn);
+       }
 
        return err;
 }
@@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
 {
        mdiobus_unregister(priv->slave_mii_bus);
+       mdiobus_free(priv->slave_mii_bus);
        of_node_put(priv->master_mii_dn);
 }
 
index d55784d..3969d89 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mii.h>
 #include <linux/phy.h>
 #include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
 #include <linux/etherdevice.h>
 
 #include "lan9303.h"
@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
 static int lan9303_port_enable(struct dsa_switch *ds, int port,
                               struct phy_device *phy)
 {
+       struct dsa_port *dp = dsa_to_port(ds, port);
        struct lan9303 *chip = ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
+       if (!dsa_port_is_user(dp))
                return 0;
 
+       vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
        return lan9303_enable_processing_port(chip, port);
 }
 
 static void lan9303_port_disable(struct dsa_switch *ds, int port)
 {
+       struct dsa_port *dp = dsa_to_port(ds, port);
        struct lan9303 *chip = ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
+       if (!dsa_port_is_user(dp))
                return;
 
+       vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
        lan9303_disable_processing_port(chip, port);
        lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
 }
@@ -1310,7 +1317,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
                                     struct device_node *np)
 {
        chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
-                                                  GPIOD_OUT_LOW);
+                                                  GPIOD_OUT_HIGH);
        if (IS_ERR(chip->reset_gpio))
                return PTR_ERR(chip->reset_gpio);
 
index 46ed953..8a7a809 100644 (file)
@@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
 static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
 {
        struct dsa_switch *ds = priv->ds;
+       int err;
 
-       ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+       ds->slave_mii_bus = mdiobus_alloc();
        if (!ds->slave_mii_bus)
                return -ENOMEM;
 
@@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
        ds->slave_mii_bus->parent = priv->dev;
        ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 
-       return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+       err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+       if (err)
+               mdiobus_free(ds->slave_mii_bus);
+
+       return err;
 }
 
 static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@@ -2145,8 +2150,10 @@ disable_switch:
        gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
        dsa_unregister_switch(priv->ds);
 mdio_bus:
-       if (mdio_np)
+       if (mdio_np) {
                mdiobus_unregister(priv->ds->slave_mii_bus);
+               mdiobus_free(priv->ds->slave_mii_bus);
+       }
 put_mdio_node:
        of_node_put(mdio_np);
        for (i = 0; i < priv->num_gphy_fw; i++)
@@ -2170,6 +2177,7 @@ static int gswip_remove(struct platform_device *pdev)
        if (priv->ds->slave_mii_bus) {
                mdiobus_unregister(priv->ds->slave_mii_bus);
                of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+               mdiobus_free(priv->ds->slave_mii_bus);
        }
 
        for (i = 0; i < priv->num_gphy_fw; i++)
index 55dbda0..243f8ad 100644 (file)
@@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
        struct dsa_switch *ds = dev->ds;
        u8 port_member = 0, cpu_port;
        const struct dsa_port *dp;
-       int i;
+       int i, j;
 
        if (!dsa_is_user_port(ds, port))
                return;
@@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
                        continue;
                if (!dsa_port_bridge_same(dp, other_dp))
                        continue;
+               if (other_p->stp_state != BR_STATE_FORWARDING)
+                       continue;
 
-               if (other_p->stp_state == BR_STATE_FORWARDING &&
-                   p->stp_state == BR_STATE_FORWARDING) {
+               if (p->stp_state == BR_STATE_FORWARDING) {
                        val |= BIT(port);
                        port_member |= BIT(i);
                }
 
+               /* Retain port [i]'s relationship to other ports than [port] */
+               for (j = 0; j < ds->num_ports; j++) {
+                       const struct dsa_port *third_dp;
+                       struct ksz_port *third_p;
+
+                       if (j == i)
+                               continue;
+                       if (j == port)
+                               continue;
+                       if (!dsa_is_user_port(ds, j))
+                               continue;
+                       third_p = &dev->ports[j];
+                       if (third_p->stp_state != BR_STATE_FORWARDING)
+                               continue;
+                       third_dp = dsa_to_port(ds, j);
+                       if (dsa_port_bridge_same(other_dp, third_dp))
+                               val |= BIT(j);
+               }
+
                dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
        }
 
index b82512e..ff3c267 100644 (file)
@@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
        if (priv->irq)
                mt7530_setup_mdio_irq(priv);
 
-       ret = mdiobus_register(bus);
+       ret = devm_mdiobus_register(dev, bus);
        if (ret) {
                dev_err(dev, "failed to register MDIO bus: %d\n", ret);
                if (priv->irq)
index 58ca684..ab16765 100644 (file)
@@ -2284,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
        if (!mv88e6xxx_max_vid(chip))
                return -EOPNOTSUPP;
 
+       /* The ATU removal procedure needs the FID to be mapped in the VTU,
+        * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+        * switchdev workqueue to ensure that all FDB entries are deleted
+        * before we remove the VLAN.
+        */
+       dsa_flush_workqueue();
+
        mv88e6xxx_reg_lock(chip);
 
        err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -3399,7 +3406,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
                        return err;
        }
 
-       bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
+       bus = mdiobus_alloc_size(sizeof(*mdio_bus));
        if (!bus)
                return -ENOMEM;
 
@@ -3424,14 +3431,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
        if (!external) {
                err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
                if (err)
-                       return err;
+                       goto out;
        }
 
        err = of_mdiobus_register(bus, np);
        if (err) {
                dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
                mv88e6xxx_g2_irq_mdio_free(chip, bus);
-               return err;
+               goto out;
        }
 
        if (external)
@@ -3440,21 +3447,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
                list_add(&mdio_bus->list, &chip->mdios);
 
        return 0;
+
+out:
+       mdiobus_free(bus);
+       return err;
 }
 
 static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
 
 {
-       struct mv88e6xxx_mdio_bus *mdio_bus;
+       struct mv88e6xxx_mdio_bus *mdio_bus, *p;
        struct mii_bus *bus;
 
-       list_for_each_entry(mdio_bus, &chip->mdios, list) {
+       list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
                bus = mdio_bus->bus;
 
                if (!mdio_bus->external)
                        mv88e6xxx_g2_irq_mdio_free(chip, bus);
 
                mdiobus_unregister(bus);
+               mdiobus_free(bus);
        }
 }
 
index bf8d382..33f0cea 100644 (file)
@@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
                return PTR_ERR(hw);
        }
 
-       bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+       bus = mdiobus_alloc_size(sizeof(*mdio_priv));
        if (!bus)
                return -ENOMEM;
 
@@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
        rc = mdiobus_register(bus);
        if (rc < 0) {
                dev_err(dev, "failed to register MDIO bus\n");
+               mdiobus_free(bus);
                return rc;
        }
 
@@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
                lynx_pcs_destroy(phylink_pcs);
        }
        mdiobus_unregister(felix->imdio);
+       mdiobus_free(felix->imdio);
 }
 
 static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
index 8c1c9da..f2f1608 100644 (file)
@@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
        }
 
        /* Needed in order to initialize the bus mutex lock */
-       rc = of_mdiobus_register(bus, NULL);
+       rc = devm_of_mdiobus_register(dev, bus, NULL);
        if (rc < 0) {
                dev_err(dev, "failed to register MDIO bus\n");
                return rc;
@@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
                mdio_device_free(mdio_device);
                lynx_pcs_destroy(phylink_pcs);
        }
-       mdiobus_unregister(felix->imdio);
+
+       /* mdiobus_unregister and mdiobus_free handled by devres */
 }
 
 static const struct felix_info seville_info_vsc9953 = {
index da0d7e6..c39de2a 100644 (file)
@@ -378,7 +378,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
        if (!mnp)
                return -ENODEV;
 
-       ret = of_mdiobus_register(mbus, mnp);
+       ret = devm_of_mdiobus_register(dev, mbus, mnp);
        of_node_put(mnp);
        if (ret)
                return ret;
@@ -1091,7 +1091,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
        }
 
        irq_domain_remove(priv->irqdomain);
-       mdiobus_unregister(priv->mbus);
        dsa_unregister_switch(&priv->ds);
 
        reset_control_assert(priv->sw_reset);
index efdcf48..2af3da4 100644 (file)
@@ -425,6 +425,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
 
        pci_free_irq_vectors(pdata->pcidev);
 
+       /* Disable all interrupts in the hardware */
+       XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
        xgbe_free_pdata(pdata);
 }
 
index da59524..f50604f 100644 (file)
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
                atl1c_clean_buffer(pdev, buffer_info);
        }
 
-       netdev_reset_queue(adapter->netdev);
+       netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
 
        /* Zero out Tx-buffers */
        memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
index c6412c5..b4381cd 100644 (file)
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct bgmac *bgmac;
+       struct resource *regs;
        int ret;
 
        bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
        if (IS_ERR(bgmac->plat.base))
                return PTR_ERR(bgmac->plat.base);
 
-       bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
-       if (IS_ERR(bgmac->plat.idm_base))
-               return PTR_ERR(bgmac->plat.idm_base);
-       else
+       /* The idm_base resource is optional for some platforms */
+       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+       if (regs) {
+               bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+               if (IS_ERR(bgmac->plat.idm_base))
+                       return PTR_ERR(bgmac->plat.idm_base);
                bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+       }
 
-       bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
-       if (IS_ERR(bgmac->plat.nicpm_base))
-               return PTR_ERR(bgmac->plat.nicpm_base);
+       /* The nicpm_base resource is optional for some platforms */
+       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+       if (regs) {
+               bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+                                                              regs);
+               if (IS_ERR(bgmac->plat.nicpm_base))
+                       return PTR_ERR(bgmac->plat.nicpm_base);
+       }
 
        bgmac->read = platform_bgmac_read;
        bgmac->write = platform_bgmac_write;
index e20aafe..b97ed9b 100644 (file)
@@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
                if (rc) {
                        dev_err(&pdev->dev,
-                               "pci_set_consistent_dma_mask failed, aborting\n");
+                               "dma_set_coherent_mask failed, aborting\n");
                        goto err_out_unmap;
                }
        } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
index 774c1f1..eedb48d 100644 (file)
@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FW_FILE_NAME_E1);
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
 
 int bnx2x_num_queues;
 module_param_named(num_queues, bnx2x_num_queues, int, 0444);
index 4f94136..b1c98d1 100644 (file)
@@ -4747,8 +4747,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
                return rc;
 
        req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
-       req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
-       req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+       if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+               req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+               req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+       }
        req->mask = cpu_to_le32(vnic->rx_mask);
        return hwrm_req_send_silent(bp, req);
 }
@@ -7787,6 +7789,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
        return 0;
 }
 
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+       if (!bp->fw_health)
+               return;
+
+       if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+               bp->fw_health->status_reliable = true;
+               bp->fw_health->resets_reliable = true;
+       } else {
+               bnxt_try_map_fw_health_reg(bp);
+       }
+}
+
 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
 {
        struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8639,6 +8654,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
        vnic->uc_filter_count = 1;
 
        vnic->rx_mask = 0;
+       if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+               goto skip_rx_mask;
+
        if (bp->dev->flags & IFF_BROADCAST)
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
@@ -8648,7 +8666,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
        if (bp->dev->flags & IFF_ALLMULTI) {
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
                vnic->mc_list_count = 0;
-       } else {
+       } else if (bp->dev->flags & IFF_MULTICAST) {
                u32 mask = 0;
 
                bnxt_mc_list_updated(bp, &mask);
@@ -8659,6 +8677,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
        if (rc)
                goto err_out;
 
+skip_rx_mask:
        rc = bnxt_hwrm_set_coal(bp);
        if (rc)
                netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -9850,8 +9869,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
                resc_reinit = true;
        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
                fw_reset = true;
-       else if (bp->fw_health && !bp->fw_health->status_reliable)
-               bnxt_try_map_fw_health_reg(bp);
+       else
+               bnxt_remap_fw_health_regs(bp);
 
        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
                netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -10330,13 +10349,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
                goto half_open_err;
        }
 
-       rc = bnxt_alloc_mem(bp, false);
+       rc = bnxt_alloc_mem(bp, true);
        if (rc) {
                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
                goto half_open_err;
        }
-       rc = bnxt_init_nic(bp, false);
+       set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+       rc = bnxt_init_nic(bp, true);
        if (rc) {
+               clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
                goto half_open_err;
        }
@@ -10344,7 +10365,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
 
 half_open_err:
        bnxt_free_skbs(bp);
-       bnxt_free_mem(bp, false);
+       bnxt_free_mem(bp, true);
        dev_close(bp->dev);
        return rc;
 }
@@ -10354,9 +10375,10 @@ half_open_err:
  */
 void bnxt_half_close_nic(struct bnxt *bp)
 {
-       bnxt_hwrm_resource_free(bp, false, false);
+       bnxt_hwrm_resource_free(bp, false, true);
        bnxt_free_skbs(bp);
-       bnxt_free_mem(bp, false);
+       bnxt_free_mem(bp, true);
+       clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
 }
 
 void bnxt_reenable_sriov(struct bnxt *bp)
@@ -10772,7 +10794,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
        if (dev->flags & IFF_ALLMULTI) {
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
                vnic->mc_list_count = 0;
-       } else {
+       } else if (dev->flags & IFF_MULTICAST) {
                mc_update = bnxt_mc_list_updated(bp, &mask);
        }
 
@@ -10849,9 +10871,10 @@ skip_uc:
            !bnxt_promisc_ok(bp))
                vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
        rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
-       if (rc && vnic->mc_list_count) {
+       if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
                netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
                            rc);
+               vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
                vnic->mc_list_count = 0;
                rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
index 440dfeb..666fc1e 100644 (file)
@@ -1921,6 +1921,7 @@ struct bnxt {
 #define BNXT_STATE_RECOVER             12
 #define BNXT_STATE_FW_NON_FATAL_COND   13
 #define BNXT_STATE_FW_ACTIVATE_RESET   14
+#define BNXT_STATE_HALF_OPEN           15      /* For offline ethtool tests */
 
 #define BNXT_NO_FW_ACCESS(bp)                                  \
        (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) ||    \
index 4da31b1..f6e21fa 100644 (file)
@@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
        }
 }
 
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED   0
+#define BNXT_LIVEPATCH_INSTALLED       FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED         FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK            (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+                                        FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED       BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags)    ((flags) & BNXT_LIVEPATCH_MASK)
+
 static int
 bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
 {
@@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
        struct hwrm_fw_livepatch_query_input *query_req;
        struct hwrm_fw_livepatch_output *patch_resp;
        struct hwrm_fw_livepatch_input *patch_req;
+       u16 flags, live_patch_state;
+       bool activated = false;
        u32 installed = 0;
-       u16 flags;
        u8 target;
        int rc;
 
@@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
                hwrm_req_drop(bp, query_req);
                return rc;
        }
-       patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
        patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
        patch_resp = hwrm_req_hold(bp, patch_req);
 
@@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
                }
 
                flags = le16_to_cpu(query_resp->status_flags);
-               if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+               live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+               if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
                        continue;
-               if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
-                   !strncmp(query_resp->active_ver, query_resp->install_ver,
-                            sizeof(query_resp->active_ver)))
+
+               if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+                       activated = true;
                        continue;
+               }
+
+               if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+                       patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+               else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+                       patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
 
                patch_req->fw_target = target;
                rc = hwrm_req_send(bp, patch_req);
@@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
        }
 
        if (!rc && !installed) {
-               NL_SET_ERR_MSG_MOD(extack, "No live patches found");
-               rc = -ENOENT;
+               if (activated) {
+                       NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+                       rc = -EEXIST;
+               } else {
+                       NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+                       rc = -ENOENT;
+               }
        }
        hwrm_req_drop(bp, query_req);
        hwrm_req_drop(bp, patch_req);
index 003330e..8aaa233 100644 (file)
@@ -25,6 +25,7 @@
 #include "bnxt_hsi.h"
 #include "bnxt.h"
 #include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
 #include "bnxt_xdp.h"
 #include "bnxt_ptp.h"
 #include "bnxt_ethtool.h"
@@ -1969,6 +1970,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
        case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
                fec->active_fec |= ETHTOOL_FEC_LLRS;
                break;
+       case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+               fec->active_fec |= ETHTOOL_FEC_OFF;
+               break;
        }
        return 0;
 }
@@ -3454,7 +3458,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
        if (!skb)
                return -ENOMEM;
        data = skb_put(skb, pkt_size);
-       eth_broadcast_addr(data);
+       ether_addr_copy(&data[i], bp->dev->dev_addr);
        i += ETH_ALEN;
        ether_addr_copy(&data[i], bp->dev->dev_addr);
        i += ETH_ALEN;
@@ -3548,9 +3552,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
        if (!offline) {
                bnxt_run_fw_tests(bp, test_mask, &test_results);
        } else {
-               rc = bnxt_close_nic(bp, false, false);
-               if (rc)
+               bnxt_ulp_stop(bp);
+               rc = bnxt_close_nic(bp, true, false);
+               if (rc) {
+                       bnxt_ulp_start(bp, rc);
                        return;
+               }
                bnxt_run_fw_tests(bp, test_mask, &test_results);
 
                buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -3560,6 +3567,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
                if (rc) {
                        bnxt_hwrm_mac_loopback(bp, false);
                        etest->flags |= ETH_TEST_FL_FAILED;
+                       bnxt_ulp_start(bp, rc);
                        return;
                }
                if (bnxt_run_loopback(bp))
@@ -3585,7 +3593,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
                }
                bnxt_hwrm_phy_loopback(bp, false, false);
                bnxt_half_close_nic(bp);
-               rc = bnxt_open_nic(bp, false, true);
+               rc = bnxt_open_nic(bp, true, true);
+               bnxt_ulp_start(bp, rc);
        }
        if (rc || bnxt_test_irq(bp)) {
                buf[BNXT_IRQ_TEST_IDX] = 1;
index 566c948..b01d429 100644 (file)
@@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
 
                /* Last byte of resp contains valid bit */
                valid = ((u8 *)ctx->resp) + len - 1;
-               for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+               for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
                        /* make sure we read from updated DMA memory */
                        dma_rmb();
                        if (*valid)
                                break;
-                       usleep_range(1, 5);
+                       if (j < 10) {
+                               udelay(1);
+                               j++;
+                       } else {
+                               usleep_range(20, 30);
+                               j += 20;
+                       }
                }
 
                if (j >= HWRM_VALID_BIT_DELAY_USEC) {
                        hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
-                                hwrm_total_timeout(i), req_type,
+                                hwrm_total_timeout(i) + j, req_type,
                                 le16_to_cpu(ctx->req->seq_id), len, *valid);
                        goto exit;
                }
index d52bd2d..c98032e 100644 (file)
@@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
 }
 
 
-#define HWRM_VALID_BIT_DELAY_USEC      150
+#define HWRM_VALID_BIT_DELAY_USEC      50000
 
 static inline bool bnxt_cfa_hwrm_message(u16 req_type)
 {
index a363da9..98498a7 100644 (file)
@@ -4712,7 +4712,7 @@ static int macb_probe(struct platform_device *pdev)
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
        if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
-               dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+               dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
                bp->hw_dma_cap |= HW_DMA_CAP_64B;
        }
 #endif
index da41eee..a06003b 100644 (file)
@@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
            MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
        adapter->params.pci.vpd_cap_addr =
            pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+       if (!adapter->params.pci.vpd_cap_addr)
+               return -ENODEV;
        ret = get_vpd_params(adapter, &adapter->params.vpd);
        if (ret < 0)
                return ret;
index 691605c..d5356db 100644 (file)
@@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
        return 0;
 }
 
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
-       struct ftgmac100 *priv = netdev_priv(netdev);
-       struct phy_device *phydev = netdev->phydev;
-       bool tx_pause, rx_pause;
-       int new_speed;
-
-       /* We store "no link" as speed 0 */
-       if (!phydev->link)
-               new_speed = 0;
-       else
-               new_speed = phydev->speed;
-
-       /* Grab pause settings from PHY if configured to do so */
-       if (priv->aneg_pause) {
-               rx_pause = tx_pause = phydev->pause;
-               if (phydev->asym_pause)
-                       tx_pause = !rx_pause;
-       } else {
-               rx_pause = priv->rx_pause;
-               tx_pause = priv->tx_pause;
-       }
-
-       /* Link hasn't changed, do nothing */
-       if (phydev->speed == priv->cur_speed &&
-           phydev->duplex == priv->cur_duplex &&
-           rx_pause == priv->rx_pause &&
-           tx_pause == priv->tx_pause)
-               return;
-
-       /* Print status if we have a link or we had one and just lost it,
-        * don't print otherwise.
-        */
-       if (new_speed || priv->cur_speed)
-               phy_print_status(phydev);
-
-       priv->cur_speed = new_speed;
-       priv->cur_duplex = phydev->duplex;
-       priv->rx_pause = rx_pause;
-       priv->tx_pause = tx_pause;
-
-       /* Link is down, do nothing else */
-       if (!new_speed)
-               return;
-
-       /* Disable all interrupts */
-       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
-       /* Reset the adapter asynchronously */
-       schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
-       struct ftgmac100 *priv = netdev_priv(netdev);
-       struct platform_device *pdev = to_platform_device(priv->dev);
-       struct device_node *np = pdev->dev.of_node;
-       struct phy_device *phydev;
-       phy_interface_t phy_intf;
-       int err;
-
-       /* Default to RGMII. It's a gigabit part after all */
-       err = of_get_phy_mode(np, &phy_intf);
-       if (err)
-               phy_intf = PHY_INTERFACE_MODE_RGMII;
-
-       /* Aspeed only supports these. I don't know about other IP
-        * block vendors so I'm going to just let them through for
-        * now. Note that this is only a warning if for some obscure
-        * reason the DT really means to lie about it or it's a newer
-        * part we don't know about.
-        *
-        * On the Aspeed SoC there are additionally straps and SCU
-        * control bits that could tell us what the interface is
-        * (or allow us to configure it while the IP block is held
-        * in reset). For now I chose to keep this driver away from
-        * those SoC specific bits and assume the device-tree is
-        * right and the SCU has been configured properly by pinmux
-        * or the firmware.
-        */
-       if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
-               netdev_warn(netdev,
-                           "Unsupported PHY mode %s !\n",
-                           phy_modes(phy_intf));
-       }
-
-       phydev = phy_find_first(priv->mii_bus);
-       if (!phydev) {
-               netdev_info(netdev, "%s: no PHY found\n", netdev->name);
-               return -ENODEV;
-       }
-
-       phydev = phy_connect(netdev, phydev_name(phydev),
-                            &ftgmac100_adjust_link, phy_intf);
-
-       if (IS_ERR(phydev)) {
-               netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
-               return PTR_ERR(phydev);
-       }
-
-       /* Indicate that we support PAUSE frames (see comment in
-        * Documentation/networking/phy.rst)
-        */
-       phy_support_asym_pause(phydev);
-
-       /* Display what we found */
-       phy_attached_info(phydev);
-
-       return 0;
-}
-
 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
 {
        struct net_device *netdev = bus->priv;
@@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
        return err;
 }
 
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
 {
-       struct ftgmac100 *priv = container_of(work, struct ftgmac100,
-                                             reset_task);
        struct net_device *netdev = priv->netdev;
        int err;
 
@@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
        rtnl_unlock();
 }
 
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+       struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+                                             reset_task);
+
+       ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+       struct ftgmac100 *priv = netdev_priv(netdev);
+       struct phy_device *phydev = netdev->phydev;
+       bool tx_pause, rx_pause;
+       int new_speed;
+
+       /* We store "no link" as speed 0 */
+       if (!phydev->link)
+               new_speed = 0;
+       else
+               new_speed = phydev->speed;
+
+       /* Grab pause settings from PHY if configured to do so */
+       if (priv->aneg_pause) {
+               rx_pause = tx_pause = phydev->pause;
+               if (phydev->asym_pause)
+                       tx_pause = !rx_pause;
+       } else {
+               rx_pause = priv->rx_pause;
+               tx_pause = priv->tx_pause;
+       }
+
+       /* Link hasn't changed, do nothing */
+       if (phydev->speed == priv->cur_speed &&
+           phydev->duplex == priv->cur_duplex &&
+           rx_pause == priv->rx_pause &&
+           tx_pause == priv->tx_pause)
+               return;
+
+       /* Print status if we have a link or we had one and just lost it,
+        * don't print otherwise.
+        */
+       if (new_speed || priv->cur_speed)
+               phy_print_status(phydev);
+
+       priv->cur_speed = new_speed;
+       priv->cur_duplex = phydev->duplex;
+       priv->rx_pause = rx_pause;
+       priv->tx_pause = tx_pause;
+
+       /* Link is down, do nothing else */
+       if (!new_speed)
+               return;
+
+       /* Disable all interrupts */
+       iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+       /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+        * order consistent to prevent dead lock.
+        */
+       if (netdev->phydev)
+               mutex_unlock(&netdev->phydev->lock);
+
+       ftgmac100_reset(priv);
+
+       if (netdev->phydev)
+               mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+       struct ftgmac100 *priv = netdev_priv(netdev);
+       struct platform_device *pdev = to_platform_device(priv->dev);
+       struct device_node *np = pdev->dev.of_node;
+       struct phy_device *phydev;
+       phy_interface_t phy_intf;
+       int err;
+
+       /* Default to RGMII. It's a gigabit part after all */
+       err = of_get_phy_mode(np, &phy_intf);
+       if (err)
+               phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+       /* Aspeed only supports these. I don't know about other IP
+        * block vendors so I'm going to just let them through for
+        * now. Note that this is only a warning if for some obscure
+        * reason the DT really means to lie about it or it's a newer
+        * part we don't know about.
+        *
+        * On the Aspeed SoC there are additionally straps and SCU
+        * control bits that could tell us what the interface is
+        * (or allow us to configure it while the IP block is held
+        * in reset). For now I chose to keep this driver away from
+        * those SoC specific bits and assume the device-tree is
+        * right and the SCU has been configured properly by pinmux
+        * or the firmware.
+        */
+       if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+               netdev_warn(netdev,
+                           "Unsupported PHY mode %s !\n",
+                           phy_modes(phy_intf));
+       }
+
+       phydev = phy_find_first(priv->mii_bus);
+       if (!phydev) {
+               netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+               return -ENODEV;
+       }
+
+       phydev = phy_connect(netdev, phydev_name(phydev),
+                            &ftgmac100_adjust_link, phy_intf);
+
+       if (IS_ERR(phydev)) {
+               netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+               return PTR_ERR(phydev);
+       }
+
+       /* Indicate that we support PAUSE frames (see comment in
+        * Documentation/networking/phy.rst)
+        */
+       phy_support_asym_pause(phydev);
+
+       /* Display what we found */
+       phy_attached_info(phydev);
+
+       return 0;
+}
+
 static int ftgmac100_open(struct net_device *netdev)
 {
        struct ftgmac100 *priv = netdev_priv(netdev);
index e985ae0..0f90d2d 100644 (file)
@@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
        }
 
        INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+       mutex_init(&priv->onestep_tstamp_lock);
        skb_queue_head_init(&priv->tx_skbs);
 
        priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4523,12 +4523,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
 #ifdef CONFIG_DEBUG_FS
        dpaa2_dbg_remove(priv);
 #endif
+
+       unregister_netdev(net_dev);
        rtnl_lock();
        dpaa2_eth_disconnect_mac(priv);
        rtnl_unlock();
 
-       unregister_netdev(net_dev);
-
        dpaa2_eth_dl_port_del(priv);
        dpaa2_eth_dl_traps_unregister(priv);
        dpaa2_eth_dl_free(priv);
index d6eefbb..cacd454 100644 (file)
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
        struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
        struct flow_dissector *dissector = rule->match.dissector;
        struct netlink_ext_ack *extack = cls->common.extack;
+       int ret = -EOPNOTSUPP;
 
        if (dissector->used_keys &
            ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
                }
 
                *vlan = (u16)match.key->vlan_id;
+               ret = 0;
        }
 
-       return 0;
+       return ret;
 }
 
 static int
index 2068199..e4e98aa 100644 (file)
@@ -609,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
 
        *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
        *work_done = work_cnt;
+       skb_record_rx_queue(skb, rx->q_num);
        if (skb_is_nonlinear(skb))
                napi_gro_frags(napi);
        else
index bda7a2a..b423e94 100644 (file)
@@ -110,6 +110,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
                                         struct ibmvnic_sub_crq_queue *tx_scrq);
 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
                                struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
 
 struct ibmvnic_stat {
        char name[ETH_GSTRING_LEN];
@@ -1424,7 +1425,7 @@ static int __ibmvnic_open(struct net_device *netdev)
        rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
        if (rc) {
                ibmvnic_napi_disable(adapter);
-               release_resources(adapter);
+               ibmvnic_disable_irqs(adapter);
                return rc;
        }
 
@@ -1474,9 +1475,6 @@ static int ibmvnic_open(struct net_device *netdev)
                rc = init_resources(adapter);
                if (rc) {
                        netdev_err(netdev, "failed to initialize resources\n");
-                       release_resources(adapter);
-                       release_rx_pools(adapter);
-                       release_tx_pools(adapter);
                        goto out;
                }
        }
@@ -1493,6 +1491,13 @@ out:
                adapter->state = VNIC_OPEN;
                rc = 0;
        }
+
+       if (rc) {
+               release_resources(adapter);
+               release_rx_pools(adapter);
+               release_tx_pools(adapter);
+       }
+
        return rc;
 }
 
@@ -2207,6 +2212,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
        return "UNKNOWN";
 }
 
+/*
+ * Initialize the init_done completion and return code values. We
+ * can get a transport event just after registering the CRQ and the
+ * tasklet will use this to communicate the transport event. To ensure
+ * we don't miss the notification/error, initialize these _before_
+ * regisering the CRQ.
+ */
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
+{
+       reinit_completion(&adapter->init_done);
+       adapter->init_done_rc = 0;
+}
+
 /*
  * do_reset returns zero if we are able to keep processing reset events, or
  * non-zero if we hit a fatal error and must halt.
@@ -2313,6 +2331,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                 */
                adapter->state = VNIC_PROBED;
 
+               reinit_init_done(adapter);
+
                if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
                        rc = init_crq_queue(adapter);
                } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
@@ -2456,7 +2476,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
         */
        adapter->state = VNIC_PROBED;
 
-       reinit_completion(&adapter->init_done);
+       reinit_init_done(adapter);
+
        rc = init_crq_queue(adapter);
        if (rc) {
                netdev_err(adapter->netdev,
@@ -2597,23 +2618,82 @@ out:
 static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_adapter *adapter;
-       bool saved_state = false;
+       unsigned int timeout = 5000;
        struct ibmvnic_rwi *tmprwi;
+       bool saved_state = false;
        struct ibmvnic_rwi *rwi;
        unsigned long flags;
-       u32 reset_state;
+       struct device *dev;
+       bool need_reset;
        int num_fails = 0;
+       u32 reset_state;
        int rc = 0;
 
        adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+               dev = &adapter->vdev->dev;
 
-       if (test_and_set_bit_lock(0, &adapter->resetting)) {
+       /* Wait for ibmvnic_probe() to complete. If probe is taking too long
+        * or if another reset is in progress, defer work for now. If probe
+        * eventually fails it will flush and terminate our work.
+        *
+        * Three possibilities here:
+        * 1. Adpater being removed  - just return
+        * 2. Timed out on probe or another reset in progress - delay the work
+        * 3. Completed probe - perform any resets in queue
+        */
+       if (adapter->state == VNIC_PROBING &&
+           !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
+               dev_err(dev, "Reset thread timed out on probe");
                queue_delayed_work(system_long_wq,
                                   &adapter->ibmvnic_delayed_reset,
                                   IBMVNIC_RESET_DELAY);
                return;
        }
 
+       /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
+       if (adapter->state == VNIC_REMOVING)
+               return;
+
+       /* ->rwi_list is stable now (no one else is removing entries) */
+
+       /* ibmvnic_probe() may have purged the reset queue after we were
+        * scheduled to process a reset so there maybe no resets to process.
+        * Before setting the ->resetting bit though, we have to make sure
+        * that there is infact a reset to process. Otherwise we may race
+        * with ibmvnic_open() and end up leaving the vnic down:
+        *
+        *      __ibmvnic_reset()           ibmvnic_open()
+        *      -----------------           --------------
+        *
+        *  set ->resetting bit
+        *                              find ->resetting bit is set
+        *                              set ->state to IBMVNIC_OPEN (i.e
+        *                              assume reset will open device)
+        *                              return
+        *  find reset queue empty
+        *  return
+        *
+        *      Neither performed vnic login/open and vnic stays down
+        *
+        * If we hold the lock and conditionally set the bit, either we
+        * or ibmvnic_open() will complete the open.
+        */
+       need_reset = false;
+       spin_lock(&adapter->rwi_lock);
+       if (!list_empty(&adapter->rwi_list)) {
+               if (test_and_set_bit_lock(0, &adapter->resetting)) {
+                       queue_delayed_work(system_long_wq,
+                                          &adapter->ibmvnic_delayed_reset,
+                                          IBMVNIC_RESET_DELAY);
+               } else {
+                       need_reset = true;
+               }
+       }
+       spin_unlock(&adapter->rwi_lock);
+
+       if (!need_reset)
+               return;
+
        rwi = get_next_rwi(adapter);
        while (rwi) {
                spin_lock_irqsave(&adapter->state_lock, flags);
@@ -2730,12 +2810,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work)
        __ibmvnic_reset(&adapter->ibmvnic_reset);
 }
 
+static void flush_reset_queue(struct ibmvnic_adapter *adapter)
+{
+       struct list_head *entry, *tmp_entry;
+
+       if (!list_empty(&adapter->rwi_list)) {
+               list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
+                       list_del(entry);
+                       kfree(list_entry(entry, struct ibmvnic_rwi, list));
+               }
+       }
+}
+
 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
                         enum ibmvnic_reset_reason reason)
 {
-       struct list_head *entry, *tmp_entry;
-       struct ibmvnic_rwi *rwi, *tmp;
        struct net_device *netdev = adapter->netdev;
+       struct ibmvnic_rwi *rwi, *tmp;
        unsigned long flags;
        int ret;
 
@@ -2754,13 +2845,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
                goto err;
        }
 
-       if (adapter->state == VNIC_PROBING) {
-               netdev_warn(netdev, "Adapter reset during probe\n");
-               adapter->init_done_rc = -EAGAIN;
-               ret = EAGAIN;
-               goto err;
-       }
-
        list_for_each_entry(tmp, &adapter->rwi_list, list) {
                if (tmp->reset_reason == reason) {
                        netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
@@ -2778,10 +2862,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        /* if we just received a transport event,
         * flush reset queue and process this reset
         */
-       if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
-               list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
-                       list_del(entry);
-       }
+       if (adapter->force_reset_recovery)
+               flush_reset_queue(adapter);
+
        rwi->reset_reason = reason;
        list_add_tail(&rwi->list, &adapter->rwi_list);
        netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
@@ -5316,9 +5399,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                        }
 
                        if (!completion_done(&adapter->init_done)) {
-                               complete(&adapter->init_done);
                                if (!adapter->init_done_rc)
                                        adapter->init_done_rc = -EAGAIN;
+                               complete(&adapter->init_done);
                        }
 
                        break;
@@ -5341,6 +5424,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                        adapter->fw_done_rc = -EIO;
                        complete(&adapter->fw_done);
                }
+
+               /* if we got here during crq-init, retry crq-init */
+               if (!completion_done(&adapter->init_done)) {
+                       adapter->init_done_rc = -EAGAIN;
+                       complete(&adapter->init_done);
+               }
+
                if (!completion_done(&adapter->stats_done))
                        complete(&adapter->stats_done);
                if (test_bit(0, &adapter->resetting))
@@ -5657,10 +5747,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
 
        adapter->from_passive_init = false;
 
-       if (reset)
-               reinit_completion(&adapter->init_done);
-
-       adapter->init_done_rc = 0;
        rc = ibmvnic_send_crq_init(adapter);
        if (rc) {
                dev_err(dev, "Send crq init failed with error %d\n", rc);
@@ -5674,12 +5760,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
 
        if (adapter->init_done_rc) {
                release_crq_queue(adapter);
+               dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
                return adapter->init_done_rc;
        }
 
        if (adapter->from_passive_init) {
                adapter->state = VNIC_OPEN;
                adapter->from_passive_init = false;
+               dev_err(dev, "CRQ-init failed, passive-init\n");
                return -EINVAL;
        }
 
@@ -5719,6 +5807,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
        unsigned char *mac_addr_p;
+       unsigned long flags;
        bool init_success;
        int rc;
 
@@ -5763,6 +5852,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        spin_lock_init(&adapter->rwi_lock);
        spin_lock_init(&adapter->state_lock);
        mutex_init(&adapter->fw_lock);
+       init_completion(&adapter->probe_done);
        init_completion(&adapter->init_done);
        init_completion(&adapter->fw_done);
        init_completion(&adapter->reset_done);
@@ -5773,6 +5863,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        init_success = false;
        do {
+               reinit_init_done(adapter);
+
+               /* clear any failovers we got in the previous pass
+                * since we are reinitializing the CRQ
+                */
+               adapter->failover_pending = false;
+
+               /* If we had already initialized CRQ, we may have one or
+                * more resets queued already. Discard those and release
+                * the CRQ before initializing the CRQ again.
+                */
+               release_crq_queue(adapter);
+
+               /* Since we are still in PROBING state, __ibmvnic_reset()
+                * will not access the ->rwi_list and since we released CRQ,
+                * we won't get _new_ transport events. But there maybe an
+                * ongoing ibmvnic_reset() call. So serialize access to
+                * rwi_list. If we win the race, ibvmnic_reset() could add
+                * a reset after we purged but thats ok - we just may end
+                * up with an extra reset (i.e similar to having two or more
+                * resets in the queue at once).
+                * CHECK.
+                */
+               spin_lock_irqsave(&adapter->rwi_lock, flags);
+               flush_reset_queue(adapter);
+               spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
                rc = init_crq_queue(adapter);
                if (rc) {
                        dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
@@ -5804,12 +5921,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                goto ibmvnic_dev_file_err;
 
        netif_carrier_off(netdev);
-       rc = register_netdev(netdev);
-       if (rc) {
-               dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
-               goto ibmvnic_register_fail;
-       }
-       dev_info(&dev->dev, "ibmvnic registered\n");
 
        if (init_success) {
                adapter->state = VNIC_PROBED;
@@ -5822,6 +5933,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        adapter->wait_for_reset = false;
        adapter->last_reset_time = jiffies;
+
+       rc = register_netdev(netdev);
+       if (rc) {
+               dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+               goto ibmvnic_register_fail;
+       }
+       dev_info(&dev->dev, "ibmvnic registered\n");
+
+       complete(&adapter->probe_done);
+
        return 0;
 
 ibmvnic_register_fail:
@@ -5836,6 +5957,17 @@ ibmvnic_stats_fail:
 ibmvnic_init_fail:
        release_sub_crqs(adapter, 1);
        release_crq_queue(adapter);
+
+       /* cleanup worker thread after releasing CRQ so we don't get
+        * transport events (i.e new work items for the worker thread).
+        */
+       adapter->state = VNIC_REMOVING;
+       complete(&adapter->probe_done);
+       flush_work(&adapter->ibmvnic_reset);
+       flush_delayed_work(&adapter->ibmvnic_delayed_reset);
+
+       flush_reset_queue(adapter);
+
        mutex_destroy(&adapter->fw_lock);
        free_netdev(netdev);
 
@@ -5912,10 +6044,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
                   be64_to_cpu(session_token));
        rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
                                H_SESSION_ERR_DETECTED, session_token, 0, 0);
-       if (rc)
+       if (rc) {
                netdev_err(netdev,
                           "H_VIOCTL initiated failover failed, rc %ld\n",
                           rc);
+               goto last_resort;
+       }
+
+       return count;
 
 last_resort:
        netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
index 4a7a56f..fa2d607 100644 (file)
@@ -930,6 +930,7 @@ struct ibmvnic_adapter {
 
        struct ibmvnic_tx_pool *tx_pool;
        struct ibmvnic_tx_pool *tso_pool;
+       struct completion probe_done;
        struct completion init_done;
        int init_done_rc;
 
index bcf680e..13382df 100644 (file)
@@ -630,6 +630,7 @@ struct e1000_phy_info {
        bool disable_polarity_correction;
        bool is_mdix;
        bool polarity_correction;
+       bool reset_disable;
        bool speed_downgraded;
        bool autoneg_wait_to_complete;
 };
index c908c84..d60e201 100644 (file)
@@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
        bool blocked = false;
        int i = 0;
 
+       /* Check the PHY (LCD) reset flag */
+       if (hw->phy.reset_disable)
+               return true;
+
        while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
               (i++ < 30))
                usleep_range(10000, 11000);
@@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
                return ret_val;
 
        if (!(data & valid_csum_mask)) {
-               e_dbg("NVM Checksum Invalid\n");
+               e_dbg("NVM Checksum valid bit not set\n");
 
-               if (hw->mac.type < e1000_pch_cnp) {
+               if (hw->mac.type < e1000_pch_tgp) {
                        data |= valid_csum_mask;
                        ret_val = e1000_write_nvm(hw, word, 1, &data);
                        if (ret_val)
index 2504b11..638a3dd 100644 (file)
 #define I217_CGFREG_ENABLE_MTA_RESET   0x0002
 #define I217_MEMPWR                    PHY_REG(772, 26)
 #define I217_MEMPWR_DISABLE_SMB_RELEASE        0x0010
+#define I217_MEMPWR_MOEM               0x1000
 
 /* Receive Address Initial CRC Calculation */
 #define E1000_PCH_RAICC(_n)    (0x05F50 + ((_n) * 4))
index a42aeb5..c5bdef3 100644 (file)
@@ -6987,8 +6987,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
+       struct e1000_hw *hw = &adapter->hw;
+       u16 phy_data;
        int rc;
 
+       if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+           hw->mac.type >= e1000_pch_adp) {
+               /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
+               e1e_rphy(hw, I217_MEMPWR, &phy_data);
+               phy_data |= I217_MEMPWR_MOEM;
+               e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+               /* Disable LCD reset */
+               hw->phy.reset_disable = true;
+       }
+
        e1000e_flush_lpic(pdev);
 
        e1000e_pm_freeze(dev);
@@ -7010,6 +7023,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
+       struct e1000_hw *hw = &adapter->hw;
+       u16 phy_data;
        int rc;
 
        /* Introduce S0ix implementation */
@@ -7020,6 +7035,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
        if (rc)
                return rc;
 
+       if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+           hw->mac.type >= e1000_pch_adp) {
+               /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
+               e1e_rphy(hw, I217_MEMPWR, &phy_data);
+               phy_data &= ~I217_MEMPWR_MOEM;
+               e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+               /* Enable LCD reset */
+               hw->phy.reset_disable = false;
+       }
+
        return e1000e_pm_thaw(dev);
 }
 
index 0c4b7df..31b03fe 100644 (file)
@@ -5372,15 +5372,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
        /* There is no need to reset BW when mqprio mode is on.  */
        if (pf->flags & I40E_FLAG_TC_MQPRIO)
                return 0;
-
-       if (!vsi->mqprio_qopt.qopt.hw) {
-               if (pf->flags & I40E_FLAG_DCB_ENABLED)
-                       goto skip_reset;
-
-               if (IS_ENABLED(CONFIG_I40E_DCB) &&
-                   i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
-                       goto skip_reset;
-
+       if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
                ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
                if (ret)
                        dev_info(&pf->pdev->dev,
@@ -5388,8 +5380,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                 vsi->seid);
                return ret;
        }
-
-skip_reset:
        memset(&bw_data, 0, sizeof(bw_data));
        bw_data.tc_valid_bits = enabled_tc;
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
index 59806d1..8942394 100644 (file)
@@ -201,6 +201,10 @@ enum iavf_state_t {
        __IAVF_RUNNING,         /* opened, working */
 };
 
+enum iavf_critical_section_t {
+       __IAVF_IN_REMOVE_TASK,  /* device being removed */
+};
+
 #define IAVF_CLOUD_FIELD_OMAC          0x01
 #define IAVF_CLOUD_FIELD_IMAC          0x02
 #define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -246,7 +250,6 @@ struct iavf_adapter {
        struct list_head mac_filter_list;
        struct mutex crit_lock;
        struct mutex client_lock;
-       struct mutex remove_lock;
        /* Lock to protect accesses to MAC and VLAN lists */
        spinlock_t mac_vlan_list_lock;
        char misc_vector_name[IFNAMSIZ + 9];
@@ -284,6 +287,7 @@ struct iavf_adapter {
 #define IAVF_FLAG_LEGACY_RX                    BIT(15)
 #define IAVF_FLAG_REINIT_ITR_NEEDED            BIT(16)
 #define IAVF_FLAG_QUEUES_DISABLED              BIT(17)
+#define IAVF_FLAG_SETUP_NETDEV_FEATURES                BIT(18)
 /* duplicates for common code */
 #define IAVF_FLAG_DCB_ENABLED                  0
        /* flags for admin queue service task */
index 8125b91..dcf2426 100644 (file)
@@ -302,8 +302,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
        rd32(hw, IAVF_VFINT_ICR01);
        rd32(hw, IAVF_VFINT_ICR0_ENA1);
 
-       /* schedule work on the private workqueue */
-       queue_work(iavf_wq, &adapter->adminq_task);
+       if (adapter->state != __IAVF_REMOVE)
+               /* schedule work on the private workqueue */
+               queue_work(iavf_wq, &adapter->adminq_task);
 
        return IRQ_HANDLED;
 }
@@ -1136,8 +1137,7 @@ void iavf_down(struct iavf_adapter *adapter)
                rss->state = IAVF_ADV_RSS_DEL_REQUEST;
        spin_unlock_bh(&adapter->adv_rss_lock);
 
-       if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
-           adapter->state != __IAVF_RESETTING) {
+       if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
                /* cancel any current operation */
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                /* Schedule operations to close down the HW. Don't wait
@@ -2374,17 +2374,22 @@ static void iavf_watchdog_task(struct work_struct *work)
        struct iavf_hw *hw = &adapter->hw;
        u32 reg_val;
 
-       if (!mutex_trylock(&adapter->crit_lock))
+       if (!mutex_trylock(&adapter->crit_lock)) {
+               if (adapter->state == __IAVF_REMOVE)
+                       return;
+
                goto restart_watchdog;
+       }
 
        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
                iavf_change_state(adapter, __IAVF_COMM_FAILED);
 
-       if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
-           adapter->state != __IAVF_RESETTING) {
-               iavf_change_state(adapter, __IAVF_RESETTING);
+       if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+               mutex_unlock(&adapter->crit_lock);
+               queue_work(iavf_wq, &adapter->reset_task);
+               return;
        }
 
        switch (adapter->state) {
@@ -2419,6 +2424,15 @@ static void iavf_watchdog_task(struct work_struct *work)
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_FAILED:
+               if (test_bit(__IAVF_IN_REMOVE_TASK,
+                            &adapter->crit_section)) {
+                       /* Do not update the state and do not reschedule
+                        * watchdog task, iavf_remove should handle this state
+                        * as it can loop forever
+                        */
+                       mutex_unlock(&adapter->crit_lock);
+                       return;
+               }
                if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
                        dev_err(&adapter->pdev->dev,
                                "Failed to communicate with PF; waiting before retry\n");
@@ -2435,6 +2449,17 @@ static void iavf_watchdog_task(struct work_struct *work)
                queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
                return;
        case __IAVF_COMM_FAILED:
+               if (test_bit(__IAVF_IN_REMOVE_TASK,
+                            &adapter->crit_section)) {
+                       /* Set state to __IAVF_INIT_FAILED and perform remove
+                        * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
+                        * doesn't bring the state back to __IAVF_COMM_FAILED.
+                        */
+                       iavf_change_state(adapter, __IAVF_INIT_FAILED);
+                       adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+                       mutex_unlock(&adapter->crit_lock);
+                       return;
+               }
                reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
                          IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
                if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
@@ -2507,7 +2532,8 @@ static void iavf_watchdog_task(struct work_struct *work)
        schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
        mutex_unlock(&adapter->crit_lock);
 restart_watchdog:
-       queue_work(iavf_wq, &adapter->adminq_task);
+       if (adapter->state >= __IAVF_DOWN)
+               queue_work(iavf_wq, &adapter->adminq_task);
        if (adapter->aq_required)
                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(20));
@@ -2601,13 +2627,13 @@ static void iavf_reset_task(struct work_struct *work)
        /* When device is being removed it doesn't make sense to run the reset
         * task, just return in such a case.
         */
-       if (mutex_is_locked(&adapter->remove_lock))
-               return;
+       if (!mutex_trylock(&adapter->crit_lock)) {
+               if (adapter->state != __IAVF_REMOVE)
+                       queue_work(iavf_wq, &adapter->reset_task);
 
-       if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
-               schedule_work(&adapter->reset_task);
                return;
        }
+
        while (!mutex_trylock(&adapter->client_lock))
                usleep_range(500, 1000);
        if (CLIENT_ENABLED(adapter)) {
@@ -2662,6 +2688,7 @@ static void iavf_reset_task(struct work_struct *work)
                        reg_val);
                iavf_disable_vf(adapter);
                mutex_unlock(&adapter->client_lock);
+               mutex_unlock(&adapter->crit_lock);
                return; /* Do not attempt to reinit. It's dead, Jim. */
        }
 
@@ -2670,8 +2697,7 @@ continue_reset:
         * ndo_open() returning, so we can't assume it means all our open
         * tasks have finished, since we're not holding the rtnl_lock here.
         */
-       running = ((adapter->state == __IAVF_RUNNING) ||
-                  (adapter->state == __IAVF_RESETTING));
+       running = adapter->state == __IAVF_RUNNING;
 
        if (running) {
                netdev->flags &= ~IFF_UP;
@@ -2826,13 +2852,19 @@ static void iavf_adminq_task(struct work_struct *work)
        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
                goto out;
 
+       if (!mutex_trylock(&adapter->crit_lock)) {
+               if (adapter->state == __IAVF_REMOVE)
+                       return;
+
+               queue_work(iavf_wq, &adapter->adminq_task);
+               goto out;
+       }
+
        event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
        event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
        if (!event.msg_buf)
                goto out;
 
-       if (iavf_lock_timeout(&adapter->crit_lock, 200))
-               goto freedom;
        do {
                ret = iavf_clean_arq_element(hw, &event, &pending);
                v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2848,6 +2880,24 @@ static void iavf_adminq_task(struct work_struct *work)
        } while (pending);
        mutex_unlock(&adapter->crit_lock);
 
+       if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
+               if (adapter->netdev_registered ||
+                   !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
+                       struct net_device *netdev = adapter->netdev;
+
+                       rtnl_lock();
+                       netdev_update_features(netdev);
+                       rtnl_unlock();
+                       /* Request VLAN offload settings */
+                       if (VLAN_V2_ALLOWED(adapter))
+                               iavf_set_vlan_offload_features
+                                       (adapter, 0, netdev->features);
+
+                       iavf_set_queue_vlan_tag_loc(adapter);
+               }
+
+               adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+       }
        if ((adapter->flags &
             (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
            adapter->state == __IAVF_RESETTING)
@@ -3800,11 +3850,12 @@ static int iavf_close(struct net_device *netdev)
        struct iavf_adapter *adapter = netdev_priv(netdev);
        int status;
 
-       if (adapter->state <= __IAVF_DOWN_PENDING)
-               return 0;
+       mutex_lock(&adapter->crit_lock);
 
-       while (!mutex_trylock(&adapter->crit_lock))
-               usleep_range(500, 1000);
+       if (adapter->state <= __IAVF_DOWN_PENDING) {
+               mutex_unlock(&adapter->crit_lock);
+               return 0;
+       }
 
        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
        if (CLIENT_ENABLED(adapter))
@@ -3853,8 +3904,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
                iavf_notify_client_l2_params(&adapter->vsi);
                adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
        }
-       adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-       queue_work(iavf_wq, &adapter->reset_task);
+
+       if (netif_running(netdev)) {
+               adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+               queue_work(iavf_wq, &adapter->reset_task);
+       }
 
        return 0;
 }
@@ -4431,7 +4485,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        mutex_init(&adapter->crit_lock);
        mutex_init(&adapter->client_lock);
-       mutex_init(&adapter->remove_lock);
        mutex_init(&hw->aq.asq_mutex);
        mutex_init(&hw->aq.arq_mutex);
 
@@ -4547,7 +4600,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
 static void iavf_remove(struct pci_dev *pdev)
 {
        struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
-       enum iavf_state_t prev_state = adapter->last_state;
        struct net_device *netdev = adapter->netdev;
        struct iavf_fdir_fltr *fdir, *fdirtmp;
        struct iavf_vlan_filter *vlf, *vlftmp;
@@ -4556,14 +4608,30 @@ static void iavf_remove(struct pci_dev *pdev)
        struct iavf_cloud_filter *cf, *cftmp;
        struct iavf_hw *hw = &adapter->hw;
        int err;
-       /* Indicate we are in remove and not to run reset_task */
-       mutex_lock(&adapter->remove_lock);
-       cancel_work_sync(&adapter->reset_task);
+
+       set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
+       /* Wait until port initialization is complete.
+        * There are flows where register/unregister netdev may race.
+        */
+       while (1) {
+               mutex_lock(&adapter->crit_lock);
+               if (adapter->state == __IAVF_RUNNING ||
+                   adapter->state == __IAVF_DOWN ||
+                   adapter->state == __IAVF_INIT_FAILED) {
+                       mutex_unlock(&adapter->crit_lock);
+                       break;
+               }
+
+               mutex_unlock(&adapter->crit_lock);
+               usleep_range(500, 1000);
+       }
        cancel_delayed_work_sync(&adapter->watchdog_task);
-       cancel_delayed_work_sync(&adapter->client_task);
+
        if (adapter->netdev_registered) {
-               unregister_netdev(netdev);
+               rtnl_lock();
+               unregister_netdevice(netdev);
                adapter->netdev_registered = false;
+               rtnl_unlock();
        }
        if (CLIENT_ALLOWED(adapter)) {
                err = iavf_lan_del_device(adapter);
@@ -4572,6 +4640,10 @@ static void iavf_remove(struct pci_dev *pdev)
                                 err);
        }
 
+       mutex_lock(&adapter->crit_lock);
+       dev_info(&adapter->pdev->dev, "Remove device\n");
+       iavf_change_state(adapter, __IAVF_REMOVE);
+
        iavf_request_reset(adapter);
        msleep(50);
        /* If the FW isn't responding, kick it once, but only once. */
@@ -4579,37 +4651,24 @@ static void iavf_remove(struct pci_dev *pdev)
                iavf_request_reset(adapter);
                msleep(50);
        }
-       if (iavf_lock_timeout(&adapter->crit_lock, 5000))
-               dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
 
-       dev_info(&adapter->pdev->dev, "Removing device\n");
+       iavf_misc_irq_disable(adapter);
        /* Shut down all the garbage mashers on the detention level */
-       iavf_change_state(adapter, __IAVF_REMOVE);
+       cancel_work_sync(&adapter->reset_task);
+       cancel_delayed_work_sync(&adapter->watchdog_task);
+       cancel_work_sync(&adapter->adminq_task);
+       cancel_delayed_work_sync(&adapter->client_task);
+
        adapter->aq_required = 0;
        adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
 
        iavf_free_all_tx_resources(adapter);
        iavf_free_all_rx_resources(adapter);
-       iavf_misc_irq_disable(adapter);
        iavf_free_misc_irq(adapter);
 
-       /* In case we enter iavf_remove from erroneous state, free traffic irqs
-        * here, so as to not cause a kernel crash, when calling
-        * iavf_reset_interrupt_capability.
-        */
-       if ((adapter->last_state == __IAVF_RESETTING &&
-            prev_state != __IAVF_DOWN) ||
-           (adapter->last_state == __IAVF_RUNNING &&
-            !(netdev->flags & IFF_UP)))
-               iavf_free_traffic_irqs(adapter);
-
        iavf_reset_interrupt_capability(adapter);
        iavf_free_q_vectors(adapter);
 
-       cancel_delayed_work_sync(&adapter->watchdog_task);
-
-       cancel_work_sync(&adapter->adminq_task);
-
        iavf_free_rss(adapter);
 
        if (hw->aq.asq.count)
@@ -4621,8 +4680,6 @@ static void iavf_remove(struct pci_dev *pdev)
        mutex_destroy(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
        mutex_destroy(&adapter->crit_lock);
-       mutex_unlock(&adapter->remove_lock);
-       mutex_destroy(&adapter->remove_lock);
 
        iounmap(hw->hw_addr);
        pci_release_regions(pdev);
index 5ee1d11..88844d6 100644 (file)
@@ -2146,29 +2146,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                                     sizeof(adapter->vlan_v2_caps)));
 
                iavf_process_config(adapter);
-
-               /* unlock crit_lock before acquiring rtnl_lock as other
-                * processes holding rtnl_lock could be waiting for the same
-                * crit_lock
-                */
-               mutex_unlock(&adapter->crit_lock);
-               /* VLAN capabilities can change during VFR, so make sure to
-                * update the netdev features with the new capabilities
-                */
-               rtnl_lock();
-               netdev_update_features(netdev);
-               rtnl_unlock();
-               if (iavf_lock_timeout(&adapter->crit_lock, 10000))
-                       dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
-                                __FUNCTION__);
-
-               /* Request VLAN offload settings */
-               if (VLAN_V2_ALLOWED(adapter))
-                       iavf_set_vlan_offload_features(adapter, 0,
-                                                      netdev->features);
-
-               iavf_set_queue_vlan_tag_loc(adapter);
-
+               adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
                }
                break;
        case VIRTCHNL_OP_ENABLE_QUEUES:
index 4e16d18..473b1f6 100644 (file)
@@ -280,7 +280,6 @@ enum ice_pf_state {
        ICE_VFLR_EVENT_PENDING,
        ICE_FLTR_OVERFLOW_PROMISC,
        ICE_VF_DIS,
-       ICE_VF_DEINIT_IN_PROGRESS,
        ICE_CFG_BUSY,
        ICE_SERVICE_SCHED,
        ICE_SERVICE_DIS,
@@ -483,6 +482,7 @@ enum ice_pf_flags {
        ICE_FLAG_VF_TRUE_PROMISC_ENA,
        ICE_FLAG_MDD_AUTO_RESET_VF,
        ICE_FLAG_LINK_LENIENT_MODE_ENA,
+       ICE_FLAG_PLUG_AUX_DEV,
        ICE_PF_FLAGS_NBITS              /* must be last */
 };
 
@@ -887,7 +887,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
        if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
                set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
                set_bit(ICE_FLAG_AUX_ENA, pf->flags);
-               ice_plug_aux_dev(pf);
+               set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
        }
 }
 
index 408d15a..e2af99a 100644 (file)
@@ -3340,9 +3340,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
 
        if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
            !ice_fw_supports_report_dflt_cfg(hw)) {
-               struct ice_link_default_override_tlv tlv;
+               struct ice_link_default_override_tlv tlv = { 0 };
 
-               if (ice_get_link_default_override(&tlv, pi))
+               status = ice_get_link_default_override(&tlv, pi);
+               if (status)
                        goto out;
 
                if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
index 864692b..73edc24 100644 (file)
@@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
                                       ctrl_vsi->rxq_map[vf->vf_id];
        rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
        rule_info.flags_info.act_valid = true;
+       rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
 
        err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
                               vf->repr->mac_rule);
index e375ac8..4f954db 100644 (file)
@@ -204,17 +204,39 @@ ice_lag_unlink(struct ice_lag *lag,
                lag->upper_netdev = NULL;
        }
 
-       if (lag->peer_netdev) {
-               dev_put(lag->peer_netdev);
-               lag->peer_netdev = NULL;
-       }
-
+       lag->peer_netdev = NULL;
        ice_set_sriov_cap(pf);
        ice_set_rdma_cap(pf);
        lag->bonded = false;
        lag->role = ICE_LAG_NONE;
 }
 
+/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+       struct ice_pf *pf = lag->pf;
+
+       /* check to see if this event is for this netdev
+        * check that we are in an aggregate
+        */
+       if (netdev != lag->netdev || !lag->bonded)
+               return;
+
+       if (lag->upper_netdev) {
+               dev_put(lag->upper_netdev);
+               lag->upper_netdev = NULL;
+               ice_set_sriov_cap(pf);
+               ice_set_rdma_cap(pf);
+       }
+       /* perform some cleanup in case we come back */
+       lag->bonded = false;
+       lag->role = ICE_LAG_NONE;
+}
+
 /**
  * ice_lag_changeupper_event - handle LAG changeupper event
  * @lag: LAG info struct
@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
                ice_lag_info_event(lag, ptr);
                break;
        case NETDEV_UNREGISTER:
-               ice_lag_unlink(lag, ptr);
+               ice_lag_unregister(lag, netdev);
                break;
        default:
                break;
index d981dc6..85a6128 100644 (file)
@@ -568,6 +568,7 @@ struct ice_tx_ctx_desc {
                        (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
 
 #define ICE_TXD_CTX_QW1_MSS_S  50
+#define ICE_TXD_CTX_MIN_MSS    64
 
 #define ICE_TXD_CTX_QW1_VSI_S  50
 #define ICE_TXD_CTX_QW1_VSI_M  (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
index 0c187cf..53256ac 100644 (file)
@@ -1684,6 +1684,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
        if (status)
                dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
                        vsi_num, status);
+
+       status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+                                ICE_FLOW_SEG_HDR_ESP);
+       if (status)
+               dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+                       vsi_num, status);
 }
 
 /**
index 3081443..f3c346e 100644 (file)
@@ -1799,7 +1799,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
                                 * reset, so print the event prior to reset.
                                 */
                                ice_print_vf_rx_mdd_event(vf);
+                               mutex_lock(&pf->vf[i].cfg_lock);
                                ice_reset_vf(&pf->vf[i], false);
+                               mutex_unlock(&pf->vf[i].cfg_lock);
                        }
                }
        }
@@ -2253,6 +2255,9 @@ static void ice_service_task(struct work_struct *work)
                return;
        }
 
+       if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+               ice_plug_aux_dev(pf);
+
        ice_clean_adminq_subtask(pf);
        ice_check_media_subtask(pf);
        ice_check_for_hang_subtask(pf);
@@ -8525,6 +8530,7 @@ ice_features_check(struct sk_buff *skb,
                   struct net_device __always_unused *netdev,
                   netdev_features_t features)
 {
+       bool gso = skb_is_gso(skb);
        size_t len;
 
        /* No point in doing any of this if neither checksum nor GSO are
@@ -8537,24 +8543,32 @@ ice_features_check(struct sk_buff *skb,
        /* We cannot support GSO if the MSS is going to be less than
         * 64 bytes. If it is then we need to drop support for GSO.
         */
-       if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+       if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
                features &= ~NETIF_F_GSO_MASK;
 
-       len = skb_network_header(skb) - skb->data;
+       len = skb_network_offset(skb);
        if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
                goto out_rm_features;
 
-       len = skb_transport_header(skb) - skb_network_header(skb);
+       len = skb_network_header_len(skb);
        if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
                goto out_rm_features;
 
        if (skb->encapsulation) {
-               len = skb_inner_network_header(skb) - skb_transport_header(skb);
-               if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
-                       goto out_rm_features;
+               /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+                * the case of IPIP frames, the transport header pointer is
+                * after the inner header! So check to make sure that this
+                * is a GRE or UDP_TUNNEL frame before doing that math.
+                */
+               if (gso && (skb_shinfo(skb)->gso_type &
+                           (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+                       len = skb_inner_network_header(skb) -
+                             skb_transport_header(skb);
+                       if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+                               goto out_rm_features;
+               }
 
-               len = skb_inner_transport_header(skb) -
-                     skb_inner_network_header(skb);
+               len = skb_inner_network_header_len(skb);
                if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
                        goto out_rm_features;
        }
index dc1b0e9..695b6dd 100644 (file)
@@ -47,6 +47,7 @@ enum ice_protocol_type {
 
 enum ice_sw_tunnel_type {
        ICE_NON_TUN = 0,
+       ICE_SW_TUN_AND_NON_TUN,
        ICE_SW_TUN_VXLAN,
        ICE_SW_TUN_GENEVE,
        ICE_SW_TUN_NVGRE,
index ae291d4..000c39d 100644 (file)
@@ -1533,9 +1533,12 @@ exit:
 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
 {
        struct timespec64 now, then;
+       int ret;
 
        then = ns_to_timespec64(delta);
-       ice_ptp_gettimex64(info, &now, NULL);
+       ret = ice_ptp_gettimex64(info, &now, NULL);
+       if (ret)
+               return ret;
        now = timespec64_add(now, then);
 
        return ice_ptp_settime64(info, (const struct timespec64 *)&now);
index 11ae0be..475ec2a 100644 (file)
@@ -4537,6 +4537,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
        case ICE_SW_TUN_NVGRE:
                prof_type = ICE_PROF_TUN_GRE;
                break;
+       case ICE_SW_TUN_AND_NON_TUN:
        default:
                prof_type = ICE_PROF_ALL;
                break;
@@ -5305,7 +5306,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        if (status)
                goto err_ice_add_adv_rule;
 
-       if (rinfo->tun_type != ICE_NON_TUN) {
+       if (rinfo->tun_type != ICE_NON_TUN &&
+           rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
                status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
                                                 s_rule->pdata.lkup_tx_rx.hdr,
                                                 pkt_offsets);
index e8aab66..65cf32e 100644 (file)
@@ -709,7 +709,7 @@ ice_tc_set_port(struct flow_match_ports match,
                        fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
                else
                        fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
-               fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+
                headers->l4_key.dst_port = match.key->dst;
                headers->l4_mask.dst_port = match.mask->dst;
        }
@@ -718,7 +718,7 @@ ice_tc_set_port(struct flow_match_ports match,
                        fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
                else
                        fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
-               fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+
                headers->l4_key.src_port = match.key->src;
                headers->l4_mask.src_port = match.mask->src;
        }
index 39b8012..408f78e 100644 (file)
@@ -500,8 +500,6 @@ void ice_free_vfs(struct ice_pf *pf)
        struct ice_hw *hw = &pf->hw;
        unsigned int tmp, i;
 
-       set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
-
        if (!pf->vf)
                return;
 
@@ -519,22 +517,26 @@ void ice_free_vfs(struct ice_pf *pf)
        else
                dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 
-       /* Avoid wait time by stopping all VFs at the same time */
-       ice_for_each_vf(pf, i)
-               ice_dis_vf_qs(&pf->vf[i]);
-
        tmp = pf->num_alloc_vfs;
        pf->num_qps_per_vf = 0;
        pf->num_alloc_vfs = 0;
        for (i = 0; i < tmp; i++) {
-               if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+               struct ice_vf *vf = &pf->vf[i];
+
+               mutex_lock(&vf->cfg_lock);
+
+               ice_dis_vf_qs(vf);
+
+               if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
                        /* disable VF qp mappings and set VF disable state */
-                       ice_dis_vf_mappings(&pf->vf[i]);
-                       set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
-                       ice_free_vf_res(&pf->vf[i]);
+                       ice_dis_vf_mappings(vf);
+                       set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+                       ice_free_vf_res(vf);
                }
 
-               mutex_destroy(&pf->vf[i].cfg_lock);
+               mutex_unlock(&vf->cfg_lock);
+
+               mutex_destroy(&vf->cfg_lock);
        }
 
        if (ice_sriov_free_msix_res(pf))
@@ -570,7 +572,6 @@ void ice_free_vfs(struct ice_pf *pf)
                                i);
 
        clear_bit(ICE_VF_DIS, pf->state);
-       clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 }
 
@@ -1498,6 +1499,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
        ice_for_each_vf(pf, v) {
                vf = &pf->vf[v];
 
+               mutex_lock(&vf->cfg_lock);
+
                vf->driver_caps = 0;
                ice_vc_set_default_allowlist(vf);
 
@@ -1512,6 +1515,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
                ice_vf_pre_vsi_rebuild(vf);
                ice_vf_rebuild_vsi(vf);
                ice_vf_post_vsi_rebuild(vf);
+
+               mutex_unlock(&vf->cfg_lock);
        }
 
        if (ice_is_eswitch_mode_switchdev(pf))
@@ -1562,6 +1567,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
        u32 reg;
        int i;
 
+       lockdep_assert_held(&vf->cfg_lock);
+
        dev = ice_pf_to_dev(pf);
 
        if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
@@ -2061,9 +2068,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
                reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
-               if (reg & BIT(bit_idx))
+               if (reg & BIT(bit_idx)) {
                        /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+                       mutex_lock(&vf->cfg_lock);
                        ice_reset_vf(vf, true);
+                       mutex_unlock(&vf->cfg_lock);
+               }
        }
 }
 
@@ -2140,7 +2150,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
        if (!vf)
                return;
 
+       mutex_lock(&vf->cfg_lock);
        ice_vc_reset_vf(vf);
+       mutex_unlock(&vf->cfg_lock);
 }
 
 /**
@@ -4625,10 +4637,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
        struct device *dev;
        int err = 0;
 
-       /* if de-init is underway, don't process messages from VF */
-       if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
-               return;
-
        dev = ice_pf_to_dev(pf);
        if (ice_validate_vf_id(pf, vf_id)) {
                err = -EINVAL;
index 5cad31c..40dbf4b 100644 (file)
@@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
                if (ret_val)
                        return ret_val;
                ret_val = igc_write_phy_reg_mdic(hw, offset, data);
-               if (ret_val)
-                       return ret_val;
                hw->phy.ops.release(hw);
        } else {
                ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
                if (ret_val)
                        return ret_val;
                ret_val = igc_read_phy_reg_mdic(hw, offset, data);
-               if (ret_val)
-                       return ret_val;
                hw->phy.ops.release(hw);
        } else {
                ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
index b3fd8e5..6a5e9cf 100644 (file)
@@ -390,12 +390,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
        u32 cmd_type;
 
        while (budget-- > 0) {
-               if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
-                   !netif_carrier_ok(xdp_ring->netdev)) {
+               if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
                        work_done = false;
                        break;
                }
 
+               if (!netif_carrier_ok(xdp_ring->netdev))
+                       break;
+
                if (!xsk_tx_peek_desc(pool, &desc))
                        break;
 
index 0015fcf..0f293ac 100644 (file)
@@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
        if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
                return;
 
-       set_ring_build_skb_enabled(rx_ring);
+       if (PAGE_SIZE < 8192)
+               if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+                       set_ring_uses_large_buffer(rx_ring);
 
-       if (PAGE_SIZE < 8192) {
-               if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
-                       return;
+       /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+       if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+               return;
 
-               set_ring_uses_large_buffer(rx_ring);
-       }
+       set_ring_build_skb_enabled(rx_ring);
 }
 
 /**
index f99adbf..04345b9 100644 (file)
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
 
 config LITEX_LITEETH
        tristate "LiteX Ethernet support"
-       depends on OF
+       depends on OF && HAS_IOMEM
        help
          If you wish to compile a kernel for hardware with a LiteX LiteEth
          device then you should answer Y to this.
index 1052475..143ca8b 100644 (file)
@@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
 
 static struct platform_device *port_platdev[3];
 
+static void mv643xx_eth_shared_of_remove(void)
+{
+       int n;
+
+       for (n = 0; n < 3; n++) {
+               platform_device_del(port_platdev[n]);
+               port_platdev[n] = NULL;
+       }
+}
+
 static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
                                          struct device_node *pnp)
 {
@@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
                return -EINVAL;
        }
 
-       of_get_mac_address(pnp, ppd.mac_addr);
+       ret = of_get_mac_address(pnp, ppd.mac_addr);
+       if (ret)
+               return ret;
 
        mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
        mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
                ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
                if (ret) {
                        of_node_put(pnp);
+                       mv643xx_eth_shared_of_remove();
                        return ret;
                }
        }
        return 0;
 }
 
-static void mv643xx_eth_shared_of_remove(void)
-{
-       int n;
-
-       for (n = 0; n < 3; n++) {
-               platform_device_del(port_platdev[n]);
-               port_platdev[n] = NULL;
-       }
-}
 #else
 static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
 {
index 7cdbf8b..1a835b4 100644 (file)
@@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
        dev->dev.of_node = port_node;
 
+       port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+       port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
        if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
                port->phylink_config.dev = &dev->dev;
                port->phylink_config.type = PHYLINK_NETDEV;
@@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                                  port->phylink_config.supported_interfaces);
                }
 
-               port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
-               port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
-
                phylink = phylink_create(&port->phylink_config, port_fwnode,
                                         phy_mode, &mvpp2_phylink_ops);
                if (IS_ERR(phylink)) {
index 26efa33..9cc844b 100644 (file)
@@ -16,11 +16,13 @@ struct mlx5e_tc_act_parse_state {
        unsigned int num_actions;
        struct mlx5e_tc_flow *flow;
        struct netlink_ext_ack *extack;
+       bool ct_clear;
        bool encap;
        bool decap;
        bool mpls_push;
        bool ptype_host;
        const struct ip_tunnel_info *tun_info;
+       struct mlx5e_mpls_info mpls_info;
        struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
        int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
        int if_count;
index 06ec30c..58cc33f 100644 (file)
@@ -27,8 +27,13 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
                struct mlx5e_priv *priv,
                struct mlx5_flow_attr *attr)
 {
+       bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
        int err;
 
+       /* It's redundant to do ct clear more than once. */
+       if (clear_action && parse_state->ct_clear)
+               return 0;
+
        err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
                                      &attr->parse_attr->mod_hdr_acts,
                                      act, parse_state->extack);
@@ -40,6 +45,8 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
        if (mlx5e_is_eswitch_flow(parse_state->flow))
                attr->esw_attr->split_count = attr->esw_attr->out_count;
 
+       parse_state->ct_clear = clear_action;
+
        return 0;
 }
 
index c614fc7..2e615e0 100644 (file)
@@ -177,6 +177,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
                return -ENOMEM;
 
        parse_state->encap = false;
+
+       if (parse_state->mpls_push) {
+               memcpy(&parse_attr->mpls_info[esw_attr->out_count],
+                      &parse_state->mpls_info, sizeof(parse_state->mpls_info));
+               parse_state->mpls_push = false;
+       }
        esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
        esw_attr->out_count++;
        /* attr->dests[].rep is resolved when we handle encap */
index 784fc4f..89ca88c 100644 (file)
@@ -22,6 +22,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
        return true;
 }
 
+static void
+copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
+              const struct flow_action_entry *act)
+{
+       mpls_info->label = act->mpls_push.label;
+       mpls_info->tc = act->mpls_push.tc;
+       mpls_info->bos = act->mpls_push.bos;
+       mpls_info->ttl = act->mpls_push.ttl;
+}
+
 static int
 tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
                       const struct flow_action_entry *act,
@@ -29,6 +39,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
                       struct mlx5_flow_attr *attr)
 {
        parse_state->mpls_push = true;
+       copy_mpls_info(&parse_state->mpls_info, act);
 
        return 0;
 }
index f832c26..70b40ae 100644 (file)
@@ -35,6 +35,7 @@ enum {
 
 struct mlx5e_tc_flow_parse_attr {
        const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+       struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
        struct net_device *filter_dev;
        struct mlx5_flow_spec spec;
        struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
index 9918ed8..d39d0da 100644 (file)
@@ -750,6 +750,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct mlx5_flow_attr *attr = flow->attr;
        const struct ip_tunnel_info *tun_info;
+       const struct mlx5e_mpls_info *mpls_info;
        unsigned long tbl_time_before = 0;
        struct mlx5e_encap_entry *e;
        struct mlx5e_encap_key key;
@@ -760,6 +761,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
 
        parse_attr = attr->parse_attr;
        tun_info = parse_attr->tun_info[out_index];
+       mpls_info = &parse_attr->mpls_info[out_index];
        family = ip_tunnel_info_af(tun_info);
        key.ip_tun_key = &tun_info->key;
        key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
@@ -810,6 +812,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
                goto out_err_init;
        }
        e->tun_info = tun_info;
+       memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
        err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
        if (err)
                goto out_err_init;
index 60952b3..c5b1617 100644 (file)
@@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
                               struct mlx5e_encap_entry *r)
 {
        const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+       const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
        struct udphdr *udp = (struct udphdr *)(buf);
        struct mpls_shim_hdr *mpls;
-       u32 tun_id;
 
-       tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
        mpls = (struct mpls_shim_hdr *)(udp + 1);
        *ip_proto = IPPROTO_UDP;
 
        udp->dest = tun_key->tp_dst;
-       *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+       *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
 
        return 0;
 }
@@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
                        void *headers_v)
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
-       struct flow_match_enc_keyid enc_keyid;
        struct flow_match_mpls match;
        void *misc2_c;
        void *misc2_v;
 
-       misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-                              misc_parameters_2);
-       misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-                              misc_parameters_2);
-
-       if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
-               return 0;
-
-       if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
-               return 0;
-
-       flow_rule_match_enc_keyid(rule, &enc_keyid);
-
-       if (!enc_keyid.mask->keyid)
-               return 0;
-
        if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
            !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
                return -EOPNOTSUPP;
 
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+               return -EOPNOTSUPP;
+
+       if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+               return 0;
+
        flow_rule_match_mpls(rule, &match);
 
        /* Only support matching the first LSE */
        if (match.mask->used_lses != 1)
                return -EOPNOTSUPP;
 
+       misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                              misc_parameters_2);
+       misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                              misc_parameters_2);
+
        MLX5_SET(fte_match_set_misc2, misc2_c,
                 outer_first_mpls_over_udp.mpls_label,
                 match.mask->ls[0].mpls_label);
index 57d755d..6e80585 100644 (file)
@@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
                if (size_read < 0) {
                        netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
                                   __func__, size_read);
-                       return 0;
+                       return size_read;
                }
 
                i += size_read;
index b01dacb..b3f7520 100644 (file)
@@ -183,6 +183,13 @@ struct mlx5e_decap_entry {
        struct rcu_head rcu;
 };
 
+struct mlx5e_mpls_info {
+       u32             label;
+       u8              tc;
+       u8              bos;
+       u8              ttl;
+};
+
 struct mlx5e_encap_entry {
        /* attached neigh hash entry */
        struct mlx5e_neigh_hash_entry *nhe;
@@ -196,6 +203,7 @@ struct mlx5e_encap_entry {
        struct list_head route_list;
        struct mlx5_pkt_reformat *pkt_reformat;
        const struct ip_tunnel_info *tun_info;
+       struct mlx5e_mpls_info mpls_info;
        unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
 
        struct net_device *out_dev;
index ee0a8f5..6530d7b 100644 (file)
@@ -1349,7 +1349,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
        }
 
        /* True when explicitly set via priv flag, or XDP prog is loaded */
-       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
+           get_cqe_tls_offload(cqe))
                goto csum_unnecessary;
 
        /* CQE csum doesn't cover padding octets in short ethernet
index 8c9163d..08a7565 100644 (file)
@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
                netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
                buf[count] = st.st_func(priv);
                netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
+               count++;
        }
 
        mutex_unlock(&priv->state_lock);
index 26e326f..00f1d16 100644 (file)
@@ -1254,9 +1254,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
        u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
 
-       if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
-               return;
-
        MLX5_SET(ppcnt_reg, in, local_port, 1);
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
        if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
@@ -1272,6 +1269,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
                         struct ethtool_fec_stats *fec_stats)
 {
+       if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+               return;
+
        fec_set_corrected_bits_total(priv, fec_stats);
        fec_set_block_stats(priv, fec_stats);
 }
index 2022fa4..b27532a 100644 (file)
@@ -3204,6 +3204,18 @@ actions_match_supported(struct mlx5e_priv *priv,
                return false;
        }
 
+       if (!(~actions &
+             (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+               NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+               return false;
+       }
+
+       if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+           actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+               NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+               return false;
+       }
+
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
            !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
                                           actions, ct_flow, ct_clear, extack))
index 11bbcd5..694c540 100644 (file)
@@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
 }
 
 int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
-                               u32 min_rate, u32 max_rate)
+                               u32 max_rate, u32 min_rate)
 {
        int err;
 
index 9a7b256..cfcd72b 100644 (file)
@@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
        if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
                return false;
 
-       if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
-           mlx5_ecpf_vport_exists(esw->dev))
-               return false;
-
        return true;
 }
 
index b628917..537c82b 100644 (file)
@@ -2074,6 +2074,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
                fte->node.del_hw_func = NULL;
                up_write_ref_node(&fte->node, false);
                tree_put_node(&fte->node, false);
+       } else {
+               up_write_ref_node(&fte->node, false);
        }
        kfree(handle);
 }
index df58cba..1e8ec4f 100644 (file)
@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
 
 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
 {
+       if (!mlx5_chains_prios_supported(chains))
+               return 1;
+
        if (mlx5_chains_ignore_flow_level_supported(chains))
                return UINT_MAX;
 
index 2c774f3..bba72b2 100644 (file)
@@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
 
        /* Check log_max_qp from HCA caps to set in current profile */
        if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
-               prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+               prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
        } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
                mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
                               prof->log_max_qp,
@@ -1840,10 +1840,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},   /* ConnectX Family mlx5Gen Virtual Function */
        { PCI_VDEVICE(MELLANOX, 0x101f) },                      /* ConnectX-6 LX */
        { PCI_VDEVICE(MELLANOX, 0x1021) },                      /* ConnectX-7 */
+       { PCI_VDEVICE(MELLANOX, 0x1023) },                      /* ConnectX-8 */
        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
        { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2dc) },                      /* BlueField-3 integrated ConnectX-7 network controller */
+       { PCI_VDEVICE(MELLANOX, 0xa2df) },                      /* BlueField-4 integrated ConnectX-8 network controller */
        { 0, }
 };
 
index 7f6fd9c..e289cfd 100644 (file)
@@ -4,7 +4,6 @@
 #include "dr_types.h"
 
 #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
 
 struct mlx5dr_icm_pool {
        enum mlx5dr_icm_type icm_type;
@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
        kvfree(icm_mr);
 }
 
-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
 {
-       chunk->ste_arr = kvzalloc(chunk->num_of_entries *
-                                 sizeof(chunk->ste_arr[0]), GFP_KERNEL);
-       if (!chunk->ste_arr)
-               return -ENOMEM;
-
-       chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
-                                    DR_STE_SIZE_REDUCED, GFP_KERNEL);
-       if (!chunk->hw_ste_arr)
-               goto out_free_ste_arr;
-
-       chunk->miss_list = kvmalloc(chunk->num_of_entries *
-                                   sizeof(chunk->miss_list[0]), GFP_KERNEL);
-       if (!chunk->miss_list)
-               goto out_free_hw_ste_arr;
+       /* We support only one type of STE size, both for ConnectX-5 and later
+        * devices. Once the support for match STE which has a larger tag is
+        * added (32B instead of 16B), the STE size for devices later than
+        * ConnectX-5 needs to account for that.
+        */
+       return DR_STE_SIZE_REDUCED;
+}
 
-       return 0;
+static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
+{
+       struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+       int index = offset / DR_STE_SIZE;
 
-out_free_hw_ste_arr:
-       kvfree(chunk->hw_ste_arr);
-out_free_ste_arr:
-       kvfree(chunk->ste_arr);
-       return -ENOMEM;
+       chunk->ste_arr = &buddy->ste_arr[index];
+       chunk->miss_list = &buddy->miss_list[index];
+       chunk->hw_ste_arr = buddy->hw_ste_arr +
+                           index * dr_icm_buddy_get_ste_size(buddy);
 }
 
 static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
 {
-       kvfree(chunk->miss_list);
-       kvfree(chunk->hw_ste_arr);
-       kvfree(chunk->ste_arr);
+       struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+
+       memset(chunk->hw_ste_arr, 0,
+              chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+       memset(chunk->ste_arr, 0,
+              chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
 }
 
 static enum mlx5dr_icm_type
@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
        kvfree(chunk);
 }
 
+static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       int num_of_entries =
+               mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
+
+       buddy->ste_arr = kvcalloc(num_of_entries,
+                                 sizeof(struct mlx5dr_ste), GFP_KERNEL);
+       if (!buddy->ste_arr)
+               return -ENOMEM;
+
+       /* Preallocate full STE size on non-ConnectX-5 devices since
+        * we need to support both full and reduced with the same cache.
+        */
+       buddy->hw_ste_arr = kvcalloc(num_of_entries,
+                                    dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
+       if (!buddy->hw_ste_arr)
+               goto free_ste_arr;
+
+       buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
+       if (!buddy->miss_list)
+               goto free_hw_ste_arr;
+
+       return 0;
+
+free_hw_ste_arr:
+       kvfree(buddy->hw_ste_arr);
+free_ste_arr:
+       kvfree(buddy->ste_arr);
+       return -ENOMEM;
+}
+
+static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       kvfree(buddy->ste_arr);
+       kvfree(buddy->hw_ste_arr);
+       kvfree(buddy->miss_list);
+}
+
 static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
 {
        struct mlx5dr_icm_buddy_mem *buddy;
@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
        buddy->icm_mr = icm_mr;
        buddy->pool = pool;
 
+       if (pool->icm_type == DR_ICM_TYPE_STE) {
+               /* Reduce allocations by preallocating and reusing the STE structures */
+               if (dr_icm_buddy_init_ste_cache(buddy))
+                       goto err_cleanup_buddy;
+       }
+
        /* add it to the -start- of the list in order to search in it first */
        list_add(&buddy->list_node, &pool->buddy_mem_list);
 
        return 0;
 
+err_cleanup_buddy:
+       mlx5dr_buddy_cleanup(buddy);
 err_free_buddy:
        kvfree(buddy);
 free_mr:
@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
 
        mlx5dr_buddy_cleanup(buddy);
 
+       if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
+               dr_icm_buddy_cleanup_ste_cache(buddy);
+
        kvfree(buddy);
 }
 
@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
        chunk->byte_size =
                mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
        chunk->seg = seg;
+       chunk->buddy_mem = buddy_mem_pool;
 
-       if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
-               mlx5dr_err(pool->dmn,
-                          "Failed to init ste arrays (order: %d)\n",
-                          chunk_size);
-               goto out_free_chunk;
-       }
+       if (pool->icm_type == DR_ICM_TYPE_STE)
+               dr_icm_chunk_ste_init(chunk, offset);
 
        buddy_mem_pool->used_memory += chunk->byte_size;
-       chunk->buddy_mem = buddy_mem_pool;
        INIT_LIST_HEAD(&chunk->chunk_list);
 
        /* chunk now is part of the used_list */
        list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
 
        return chunk;
-
-out_free_chunk:
-       kvfree(chunk);
-       return NULL;
 }
 
 static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
 {
-       if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
-               return true;
+       int allow_hot_size;
+
+       /* sync when hot memory reaches half of the pool size */
+       allow_hot_size =
+               mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+                                                  pool->icm_type) / 2;
 
-       return false;
+       return pool->hot_memory_size > allow_hot_size;
 }
 
 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
index e87cf49..38971fe 100644 (file)
@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
        return (spec->dmac_47_16 || spec->dmac_15_0);
 }
 
-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
-               spec->src_ip_63_32 || spec->src_ip_31_0);
-}
-
-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
-               spec->dst_ip_63_32 || spec->dst_ip_31_0);
-}
-
 static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
 {
        return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
@@ -503,11 +491,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
                                                    &mask, inner, rx);
 
                if (outer_ipv == DR_RULE_IPV6) {
-                       if (dr_mask_is_dst_addr_set(&mask.outer))
+                       if (DR_MASK_IS_DST_IP_SET(&mask.outer))
                                mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
                                                                 &mask, inner, rx);
 
-                       if (dr_mask_is_src_addr_set(&mask.outer))
+                       if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
                                mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
                                                                 &mask, inner, rx);
 
@@ -610,11 +598,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
                                                    &mask, inner, rx);
 
                if (inner_ipv == DR_RULE_IPV6) {
-                       if (dr_mask_is_dst_addr_set(&mask.inner))
+                       if (DR_MASK_IS_DST_IP_SET(&mask.inner))
                                mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
                                                                 &mask, inner, rx);
 
-                       if (dr_mask_is_src_addr_set(&mask.inner))
+                       if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
                                mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
                                                                 &mask, inner, rx);
 
index 7e61742..187e29b 100644 (file)
@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
                                                 used_hw_action_num);
 }
 
+static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
+                                      struct mlx5dr_match_spec *spec)
+{
+       if (spec->ip_version) {
+               if (spec->ip_version != 0xf) {
+                       mlx5dr_err(dmn,
+                                  "Partial ip_version mask with src/dst IP is not supported\n");
+                       return -EINVAL;
+               }
+       } else if (spec->ethertype != 0xffff &&
+                  (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
+               mlx5dr_err(dmn,
+                          "Partial/no ethertype mask with src/dst IP is not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
                               u8 match_criteria,
                               struct mlx5dr_match_param *mask,
                               struct mlx5dr_match_param *value)
 {
-       if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+       if (value)
+               return 0;
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
                if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
                        mlx5dr_err(dmn,
                                   "Partial mask source_port is not supported\n");
@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
                }
        }
 
+       if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
+           dr_ste_build_pre_check_spec(dmn, &mask->outer))
+               return -EINVAL;
+
+       if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
+           dr_ste_build_pre_check_spec(dmn, &mask->inner))
+               return -EINVAL;
+
        return 0;
 }
 
index 1b3d484..55fcb75 100644 (file)
@@ -798,6 +798,16 @@ struct mlx5dr_match_param {
                                       (_misc3)->icmpv4_code || \
                                       (_misc3)->icmpv4_header_data)
 
+#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
+                                     (_spec)->src_ip_95_64  || \
+                                     (_spec)->src_ip_63_32  || \
+                                     (_spec)->src_ip_31_0)
+
+#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
+                                     (_spec)->dst_ip_95_64  || \
+                                     (_spec)->dst_ip_63_32  || \
+                                     (_spec)->dst_ip_31_0)
+
 struct mlx5dr_esw_caps {
        u64 drop_icm_address_rx;
        u64 drop_icm_address_tx;
index a476da2..3f31146 100644 (file)
@@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
                dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
 }
 
-#define MLX5_FLOW_CONTEXT_ACTION_MAX  32
+/* We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX  34
 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                                  struct mlx5_flow_table *ft,
                                  struct mlx5_flow_group *group,
@@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                        enum mlx5_flow_destination_type type = dst->dest_attr.type;
                        u32 id;
 
-                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
-                           num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                               err = -ENOSPC;
+                       if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+                           num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -EOPNOTSUPP;
                                goto free_actions;
                        }
 
@@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                            MLX5_FLOW_DESTINATION_TYPE_COUNTER)
                                continue;
 
-                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                               err = -ENOSPC;
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+                           fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -EOPNOTSUPP;
                                goto free_actions;
                        }
 
@@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
        params.match_sz = match_sz;
        params.match_buf = (u64 *)fte->val;
        if (num_term_actions == 1) {
-               if (term_actions->reformat)
+               if (term_actions->reformat) {
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -EOPNOTSUPP;
+                               goto free_actions;
+                       }
                        actions[num_actions++] = term_actions->reformat;
+               }
 
+               if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                       err = -EOPNOTSUPP;
+                       goto free_actions;
+               }
                actions[num_actions++] = term_actions->dest;
        } else if (num_term_actions > 1) {
                bool ignore_flow_level =
                        !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
 
+               if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+                   fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                       err = -EOPNOTSUPP;
+                       goto free_actions;
+               }
                tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
                                                                term_actions,
                                                                num_term_actions,
index c7c9313..dfa2234 100644 (file)
@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
         * sync_ste command sets them free.
         */
        struct list_head        hot_list;
+
+       /* Memory optimisation */
+       struct mlx5dr_ste       *ste_arr;
+       struct list_head        *miss_list;
+       u8                      *hw_ste_arr;
 };
 
 int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
index 59783fc..10b866e 100644 (file)
@@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev,
        stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
        stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
        stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
-       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
                stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
                                               + idx];
        stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
index a1acc9b..d40e18c 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/phylink.h>
 #include <linux/hrtimer.h>
 
+#include "sparx5_main_regs.h"
+
 /* Target chip type */
 enum spx5_target_chiptype {
        SPX5_TARGET_CT_7546    = 0x7546,  /* SparX-5-64  Enterprise */
index 4ce490a..8e56ffa 100644 (file)
@@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
        struct sparx5 *sparx5 = port->sparx5;
        int ret;
 
-       /* Make the port a member of the VLAN */
-       set_bit(port->portno, sparx5->vlan_mask[vid]);
-       ret = sparx5_vlant_set_mask(sparx5, vid);
-       if (ret)
-               return ret;
-
-       /* Default ingress vlan classification */
-       if (pvid)
-               port->pvid = vid;
-
        /* Untagged egress vlan classification */
        if (untagged && port->vid != vid) {
                if (port->vid) {
@@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
                port->vid = vid;
        }
 
+       /* Make the port a member of the VLAN */
+       set_bit(port->portno, sparx5->vlan_mask[vid]);
+       ret = sparx5_vlant_set_mask(sparx5, vid);
+       if (ret)
+               return ret;
+
+       /* Default ingress vlan classification */
+       if (pvid)
+               port->pvid = vid;
+
        sparx5_vlan_port_apply(sparx5, port);
 
        return 0;
index 455293a..fd3ceb7 100644 (file)
@@ -549,14 +549,18 @@ EXPORT_SYMBOL(ocelot_vlan_add);
 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       bool del_pvid = false;
        int err;
 
+       if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+               del_pvid = true;
+
        err = ocelot_vlan_member_del(ocelot, port, vid);
        if (err)
                return err;
 
        /* Ingress */
-       if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+       if (del_pvid)
                ocelot_port_set_pvid(ocelot, port, NULL);
 
        /* Egress */
@@ -1432,6 +1436,8 @@ static void
 ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
 {
        trap->key_type = OCELOT_VCAP_KEY_IPV4;
+       trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+       trap->key.ipv4.proto.mask[0] = 0xff;
        trap->key.ipv4.dport.value = PTP_EV_PORT;
        trap->key.ipv4.dport.mask = 0xffff;
 }
@@ -1440,6 +1446,8 @@ static void
 ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
 {
        trap->key_type = OCELOT_VCAP_KEY_IPV6;
+       trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+       trap->key.ipv4.proto.mask[0] = 0xff;
        trap->key.ipv6.dport.value = PTP_EV_PORT;
        trap->key.ipv6.dport.mask = 0xffff;
 }
@@ -1448,6 +1456,8 @@ static void
 ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
 {
        trap->key_type = OCELOT_VCAP_KEY_IPV4;
+       trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+       trap->key.ipv4.proto.mask[0] = 0xff;
        trap->key.ipv4.dport.value = PTP_GEN_PORT;
        trap->key.ipv4.dport.mask = 0xffff;
 }
@@ -1456,6 +1466,8 @@ static void
 ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
 {
        trap->key_type = OCELOT_VCAP_KEY_IPV6;
+       trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+       trap->key.ipv4.proto.mask[0] = 0xff;
        trap->key.ipv6.dport.value = PTP_GEN_PORT;
        trap->key.ipv6.dport.mask = 0xffff;
 }
@@ -1737,12 +1749,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
 }
 EXPORT_SYMBOL(ocelot_get_strings);
 
+/* Caller must hold &ocelot->stats_lock */
 static void ocelot_update_stats(struct ocelot *ocelot)
 {
        int i, j;
 
-       mutex_lock(&ocelot->stats_lock);
-
        for (i = 0; i < ocelot->num_phys_ports; i++) {
                /* Configure the port to read the stats from */
                ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
@@ -1761,8 +1772,6 @@ static void ocelot_update_stats(struct ocelot *ocelot)
                                              ~(u64)U32_MAX) + val;
                }
        }
-
-       mutex_unlock(&ocelot->stats_lock);
 }
 
 static void ocelot_check_stats_work(struct work_struct *work)
@@ -1771,7 +1780,9 @@ static void ocelot_check_stats_work(struct work_struct *work)
        struct ocelot *ocelot = container_of(del_work, struct ocelot,
                                             stats_work);
 
+       mutex_lock(&ocelot->stats_lock);
        ocelot_update_stats(ocelot);
+       mutex_unlock(&ocelot->stats_lock);
 
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
@@ -1781,12 +1792,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
 {
        int i;
 
+       mutex_lock(&ocelot->stats_lock);
+
        /* check and update now */
        ocelot_update_stats(ocelot);
 
        /* Copy all counters */
        for (i = 0; i < ocelot->num_stats; i++)
                *data++ = ocelot->stats[port * ocelot->num_stats + i];
+
+       mutex_unlock(&ocelot->stats_lock);
 }
 EXPORT_SYMBOL(ocelot_get_ethtool_stats);
 
index 784292b..1543e47 100644 (file)
@@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
                return true;
        if (netif_is_gretap(netdev))
                return true;
+       if (netif_is_ip6gretap(netdev))
+               return true;
 
        return false;
 }
index dfb4468..cb43651 100644 (file)
@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
                          int port, bool mod)
 {
        struct nfp_flower_priv *priv = app->priv;
-       int ida_idx = NFP_MAX_MAC_INDEX, err;
        struct nfp_tun_offloaded_mac *entry;
+       int ida_idx = -1, err;
        u16 nfp_mac_idx = 0;
 
        entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
@@ -997,7 +997,7 @@ err_remove_hash:
 err_free_entry:
        kfree(entry);
 err_free_ida:
-       if (ida_idx != NFP_MAX_MAC_INDEX)
+       if (ida_idx != -1)
                ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
 
        return err;
@@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
        struct nfp_flower_repr_priv *repr_priv;
        struct nfp_tun_offloaded_mac *entry;
        struct nfp_repr *repr;
+       u16 nfp_mac_idx;
        int ida_idx;
 
        entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
@@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
                entry->bridge_count--;
 
                if (!entry->bridge_count && entry->ref_count) {
-                       u16 nfp_mac_idx;
-
                        nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
                        if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
                                                     false)) {
@@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
 
        /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
        if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
-               u16 nfp_mac_idx;
                int port, err;
 
                repr_priv = list_first_entry(&entry->repr_list,
@@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
                                            &entry->ht_node,
                                            offloaded_macs_params));
+
+       if (nfp_flower_is_supported_bridge(netdev))
+               nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+       else
+               nfp_mac_idx = entry->index;
+
        /* If MAC has global ID then extract and free the ida entry. */
-       if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+       if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
                ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
                ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
        }
index 32161a5..2881f5b 100644 (file)
@@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str)
        char *opt;
 
        if (!str || !*str)
-               return -EINVAL;
+               return 1;
        while ((opt = strsep(&str, ",")) != NULL) {
                if (!strncmp(opt, "eee_timer:", 10)) {
                        if (kstrtoint(opt + 10, 0, &eee_timer))
                                goto err;
                }
        }
-       return 0;
+       return 1;
 
 err:
        pr_err("%s: ERROR broken module parameter conversion\n", __func__);
-       return -EINVAL;
+       return 1;
 }
 
 __setup("sxgbeeth=", sxgbe_cmdline_opt);
index be6bfd6..50baf62 100644 (file)
@@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
        /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
        spin_lock_bh(&mcdi->iface_lock);
        ++mcdi->seqno;
+       seqno = mcdi->seqno & SEQ_MASK;
        spin_unlock_bh(&mcdi->iface_lock);
 
-       seqno = mcdi->seqno & SEQ_MASK;
        xflags = 0;
        if (mcdi->mode == MCDI_MODE_EVENTS)
                xflags |= MCDI_HEADER_XFLAGS_EVREQ;
index bde76ea..422e322 100644 (file)
@@ -2262,6 +2262,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
        stmmac_stop_tx(priv, priv->ioaddr, chan);
 }
 
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+       u32 chan;
+
+       for (chan = 0; chan < dma_csr_ch; chan++) {
+               struct stmmac_channel *ch = &priv->channel[chan];
+               unsigned long flags;
+
+               spin_lock_irqsave(&ch->lock, flags);
+               stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+               spin_unlock_irqrestore(&ch->lock, flags);
+       }
+}
+
 /**
  * stmmac_start_all_dma - start all RX and TX DMA channels
  * @priv: driver private structure
@@ -2904,8 +2921,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
                stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
 
        /* DMA CSR Channel configuration */
-       for (chan = 0; chan < dma_csr_ch; chan++)
+       for (chan = 0; chan < dma_csr_ch; chan++) {
                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+               stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+       }
 
        /* DMA RX Channel Configuration */
        for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3761,6 +3780,7 @@ static int stmmac_open(struct net_device *dev)
 
        stmmac_enable_all_queues(priv);
        netif_tx_start_all_queues(priv->dev);
+       stmmac_enable_all_dma_irq(priv);
 
        return 0;
 
@@ -6510,8 +6530,10 @@ int stmmac_xdp_open(struct net_device *dev)
        }
 
        /* DMA CSR Channel configuration */
-       for (chan = 0; chan < dma_csr_ch; chan++)
+       for (chan = 0; chan < dma_csr_ch; chan++) {
                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+               stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+       }
 
        /* Adjust Split header */
        sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6572,6 +6594,7 @@ int stmmac_xdp_open(struct net_device *dev)
        stmmac_enable_all_queues(priv);
        netif_carrier_on(dev);
        netif_tx_start_all_queues(dev);
+       stmmac_enable_all_dma_irq(priv);
 
        return 0;
 
@@ -7451,6 +7474,7 @@ int stmmac_resume(struct device *dev)
        stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
 
        stmmac_enable_all_queues(priv);
+       stmmac_enable_all_dma_irq(priv);
 
        mutex_unlock(&priv->lock);
        rtnl_unlock();
@@ -7467,7 +7491,7 @@ static int __init stmmac_cmdline_opt(char *str)
        char *opt;
 
        if (!str || !*str)
-               return -EINVAL;
+               return 1;
        while ((opt = strsep(&str, ",")) != NULL) {
                if (!strncmp(opt, "debug:", 6)) {
                        if (kstrtoint(opt + 6, 0, &debug))
@@ -7498,11 +7522,11 @@ static int __init stmmac_cmdline_opt(char *str)
                                goto err;
                }
        }
-       return 0;
+       return 1;
 
 err:
        pr_err("%s: ERROR broken module parameter conversion", __func__);
-       return -EINVAL;
+       return 1;
 }
 
 __setup("stmmaceth=", stmmac_cmdline_opt);
index b900ab5..64c7e26 100644 (file)
@@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev)
                lp->indirect_lock = devm_kmalloc(&pdev->dev,
                                                 sizeof(*lp->indirect_lock),
                                                 GFP_KERNEL);
+               if (!lp->indirect_lock)
+                       return -ENOMEM;
                spin_lock_init(lp->indirect_lock);
        }
 
index b1fc153..45c3c4a 100644 (file)
@@ -668,11 +668,11 @@ static void sixpack_close(struct tty_struct *tty)
         */
        netif_stop_queue(sp->dev);
 
+       unregister_netdev(sp->dev);
+
        del_timer_sync(&sp->tx_t);
        del_timer_sync(&sp->resync_t);
 
-       unregister_netdev(sp->dev);
-
        /* Free all 6pack frame buffers after unreg. */
        kfree(sp->rbuff);
        kfree(sp->xbuff);
index f3438d3..2bc730f 100644 (file)
@@ -2975,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
        ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
        ca8210_hw->phy->cca_ed_level = -9800;
        ca8210_hw->phy->symbol_duration = 16;
-       ca8210_hw->phy->lifs_period = 40;
-       ca8210_hw->phy->sifs_period = 12;
+       ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+       ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
        ca8210_hw->flags =
                IEEE802154_HW_AFILT |
                IEEE802154_HW_OMIT_CKSUM |
index d037682..6782c2c 100644 (file)
@@ -2,7 +2,9 @@ config QCOM_IPA
        tristate "Qualcomm IPA support"
        depends on NET && QCOM_SMEM
        depends on ARCH_QCOM || COMPILE_TEST
+       depends on INTERCONNECT
        depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+       depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
        select QCOM_MDT_LOADER if ARCH_QCOM
        select QCOM_SCM
        select QCOM_QMI_HELPERS
index eaa6fb3..62723a7 100644 (file)
@@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
                mctp_serial_push(dev, c[i]);
 }
 
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+       struct mctp_serial *dev = netdev_priv(ndev);
+
+       cancel_work_sync(&dev->tx_work);
+}
+
 static const struct net_device_ops mctp_serial_netdev_ops = {
        .ndo_start_xmit = mctp_serial_tx,
+       .ndo_uninit = mctp_serial_uninit,
 };
 
 static void mctp_serial_setup(struct net_device *ndev)
@@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty)
        int idx = dev->idx;
 
        unregister_netdev(dev->netdev);
-       cancel_work_sync(&dev->tx_work);
        ida_free(&mctp_serial_ida, idx);
 }
 
index 966c3b4..e227358 100644 (file)
@@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = {
        { .compatible = "aspeed,ast2600-mdio", },
        { },
 };
+MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
 
 static struct platform_driver aspeed_mdio_driver = {
        .driver = {
index 5f4cd24..4eba5a9 100644 (file)
@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
        if (ret)
                return ret;
 
-       return clk_prepare_enable(priv->mdio_clk);
+       ret = clk_prepare_enable(priv->mdio_clk);
+       if (ret == 0)
+               mdelay(10);
+
+       return ret;
 }
 
 static int ipq4019_mdio_probe(struct platform_device *pdev)
index 4300261..378ee77 100644 (file)
@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
                if (err)
                        goto err_fib6_rt_nh_del;
 
-               fib6_event->rt_arr[i]->trap = true;
+               WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
        }
 
        return 0;
 
 err_fib6_rt_nh_del:
        for (i--; i >= 0; i--) {
-               fib6_event->rt_arr[i]->trap = false;
+               WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
                nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
        }
        return err;
index fa71fb7..2429db6 100644 (file)
@@ -553,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
        else
                mscr = 0;
 
-       return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
-                               MII_88E1121_PHY_MSCR_REG,
-                               MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
+       return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
+                                       MII_88E1121_PHY_MSCR_REG,
+                                       MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
 }
 
 static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -569,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
                        return err;
        }
 
+       changed = err;
+
        err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
        if (err < 0)
                return err;
 
-       changed = err;
+       changed |= err;
 
        err = genphy_config_aneg(phydev);
        if (err < 0)
@@ -1213,16 +1215,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
 {
        int err;
 
-       err = genphy_soft_reset(phydev);
+       err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
        if (err < 0)
                return err;
 
-       err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+       err = genphy_config_aneg(phydev);
        if (err < 0)
                return err;
 
-       err = genphy_config_aneg(phydev);
-       return 0;
+       return genphy_soft_reset(phydev);
 }
 
 static int m88e1118_config_init(struct phy_device *phydev)
index b7a5ae2..68ee434 100644 (file)
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
 
 static int mt7531_phy_config_init(struct phy_device *phydev)
 {
-       if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
-               return -EINVAL;
-
        mtk_gephy_config_init(phydev);
 
        /* PHY link down power saving enable */
index 1a627ba..a310989 100644 (file)
@@ -1468,58 +1468,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        u16 hdr_off;
        u32 *pkt_hdr;
 
-       /* This check is no longer done by usbnet */
-       if (skb->len < dev->net->hard_header_len)
+       /* At the end of the SKB, there's a header telling us how many packets
+        * are bundled into this buffer and where we can find an array of
+        * per-packet metadata (which contains elements encoded into u16).
+        */
+       if (skb->len < 4)
                return 0;
-
        skb_trim(skb, skb->len - 4);
        rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
-
        pkt_cnt = (u16)rx_hdr;
        hdr_off = (u16)(rx_hdr >> 16);
+
+       if (pkt_cnt == 0)
+               return 0;
+
+       /* Make sure that the bounds of the metadata array are inside the SKB
+        * (and in front of the counter at the end).
+        */
+       if (pkt_cnt * 2 + hdr_off > skb->len)
+               return 0;
        pkt_hdr = (u32 *)(skb->data + hdr_off);
 
-       while (pkt_cnt--) {
+       /* Packets must not overlap the metadata array */
+       skb_trim(skb, hdr_off);
+
+       for (; ; pkt_cnt--, pkt_hdr++) {
                u16 pkt_len;
 
                le32_to_cpus(pkt_hdr);
                pkt_len = (*pkt_hdr >> 16) & 0x1fff;
 
-               /* Check CRC or runt packet */
-               if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
-                   (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
-                       skb_pull(skb, (pkt_len + 7) & 0xFFF8);
-                       pkt_hdr++;
-                       continue;
-               }
-
-               if (pkt_cnt == 0) {
-                       skb->len = pkt_len;
-                       /* Skip IP alignment pseudo header */
-                       skb_pull(skb, 2);
-                       skb_set_tail_pointer(skb, skb->len);
-                       skb->truesize = pkt_len + sizeof(struct sk_buff);
-                       ax88179_rx_checksum(skb, pkt_hdr);
-                       return 1;
-               }
+               if (pkt_len > skb->len)
+                       return 0;
 
-               ax_skb = skb_clone(skb, GFP_ATOMIC);
-               if (ax_skb) {
+               /* Check CRC or runt packet */
+               if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
+                   pkt_len >= 2 + ETH_HLEN) {
+                       bool last = (pkt_cnt == 0);
+
+                       if (last) {
+                               ax_skb = skb;
+                       } else {
+                               ax_skb = skb_clone(skb, GFP_ATOMIC);
+                               if (!ax_skb)
+                                       return 0;
+                       }
                        ax_skb->len = pkt_len;
                        /* Skip IP alignment pseudo header */
                        skb_pull(ax_skb, 2);
                        skb_set_tail_pointer(ax_skb, ax_skb->len);
                        ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(ax_skb, pkt_hdr);
+
+                       if (last)
+                               return 1;
+
                        usbnet_skb_return(dev, ax_skb);
-               } else {
-                       return 0;
                }
 
-               skb_pull(skb, (pkt_len + 7) & 0xFFF8);
-               pkt_hdr++;
+               /* Trim this packet away from the SKB */
+               if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+                       return 0;
        }
-       return 1;
 }
 
 static struct sk_buff *
index eb3817d..9b4dfa3 100644 (file)
@@ -583,6 +583,11 @@ static const struct usb_device_id  products[] = {
        .bInterfaceSubClass     = USB_CDC_SUBCLASS_ETHERNET, \
        .bInterfaceProtocol     = USB_CDC_PROTO_NONE
 
+#define ZAURUS_FAKE_INTERFACE \
+       .bInterfaceClass        = USB_CLASS_COMM, \
+       .bInterfaceSubClass     = USB_CDC_SUBCLASS_MDLM, \
+       .bInterfaceProtocol     = USB_CDC_PROTO_NONE
+
 /* SA-1100 based Sharp Zaurus ("collie"), or compatible;
  * wire-incompatible with true CDC Ethernet implementations.
  * (And, it seems, needlessly so...)
@@ -636,6 +641,13 @@ static const struct usb_device_id  products[] = {
        .idProduct              = 0x9032,       /* SL-6000 */
        ZAURUS_MASTER_INTERFACE,
        .driver_info            = 0,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
+       .idProduct              = 0x9032,       /* SL-6000 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info            = 0,
 }, {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                 | USB_DEVICE_ID_MATCH_DEVICE,
index 82bb5ed..c0b8b4a 100644 (file)
@@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = {
          .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
        },
 
+       /* Telit FN990 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+       },
+
        /* default entry */
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info_zlp,
index e303b52..15f91d6 100644 (file)
@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
 {
        struct sk_buff *skb;
        struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
-       int len;
+       unsigned int len;
        int nframes;
        int x;
-       int offset;
+       unsigned int offset;
        union {
                struct usb_cdc_ncm_ndp16 *ndp16;
                struct usb_cdc_ncm_ndp32 *ndp32;
@@ -1790,8 +1790,8 @@ next_ndp:
                        break;
                }
 
-               /* sanity checking */
-               if (((offset + len) > skb_in->len) ||
+               /* sanity checking - watch out for integer wrap*/
+               if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
                                (len > ctx->rx_max) || (len < ETH_HLEN)) {
                        netif_dbg(dev, rx_err, dev->net,
                                  "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
index 37e5f34..3353e76 100644 (file)
@@ -1400,6 +1400,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e preproduction config */
        {QMI_FIXED_INTF(0x413c, 0x81e0, 0)},    /* Dell Wireless 5821e with eSIM support*/
+       {QMI_FIXED_INTF(0x413c, 0x81e4, 0)},    /* Dell Wireless 5829e with eSIM support*/
+       {QMI_FIXED_INTF(0x413c, 0x81e6, 0)},    /* Dell Wireless 5829e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
index b658510..5a53e63 100644 (file)
@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                /* ignore the CRC length */
                len = (skb->data[1] | (skb->data[2] << 8)) - 4;
 
-               if (len > ETH_FRAME_LEN)
+               if (len > ETH_FRAME_LEN || len > skb->len)
                        return 0;
 
                /* the last packet of current skb */
index 8e717a0..7984f21 100644 (file)
@@ -256,6 +256,11 @@ static const struct usb_device_id  products [] = {
        .bInterfaceSubClass     = USB_CDC_SUBCLASS_ETHERNET, \
        .bInterfaceProtocol     = USB_CDC_PROTO_NONE
 
+#define ZAURUS_FAKE_INTERFACE \
+       .bInterfaceClass        = USB_CLASS_COMM, \
+       .bInterfaceSubClass     = USB_CDC_SUBCLASS_MDLM, \
+       .bInterfaceProtocol     = USB_CDC_PROTO_NONE
+
 /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
 {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
@@ -313,6 +318,13 @@ static const struct usb_device_id  products [] = {
        .idProduct              = 0x9032,       /* SL-6000 */
        ZAURUS_MASTER_INTERFACE,
        .driver_info = ZAURUS_PXA_INFO,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                           | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
+       .idProduct              = 0x9032,       /* SL-6000 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info = (unsigned long)&bogus_mdlm_info,
 }, {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                 | USB_DEVICE_ID_MATCH_DEVICE,
index 354a963..d29fb97 100644 (file)
@@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
 {
        /* Write ptr_ring before reading rx_notify_masked */
        smp_mb();
-       if (!rq->rx_notify_masked) {
-               rq->rx_notify_masked = true;
-               napi_schedule(&rq->xdp_napi);
+       if (!READ_ONCE(rq->rx_notify_masked) &&
+           napi_schedule_prep(&rq->xdp_napi)) {
+               WRITE_ONCE(rq->rx_notify_masked, true);
+               __napi_schedule(&rq->xdp_napi);
        }
 }
 
@@ -912,8 +913,10 @@ static int veth_poll(struct napi_struct *napi, int budget)
                /* Write rx_notify_masked before reading ptr_ring */
                smp_store_mb(rq->rx_notify_masked, false);
                if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
-                       rq->rx_notify_masked = true;
-                       napi_schedule(&rq->xdp_napi);
+                       if (napi_schedule_prep(&rq->xdp_napi)) {
+                               WRITE_ONCE(rq->rx_notify_masked, true);
+                               __napi_schedule(&rq->xdp_napi);
+                       }
                }
        }
 
index 0eb13e5..d991409 100644 (file)
@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
 {
        struct brcmf_fw_item *first = &req->items[0];
        struct brcmf_fw *fwctx;
-       char *alt_path;
+       char *alt_path = NULL;
        int ret;
 
        brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
        fwctx->done = fw_cb;
 
        /* First try alternative board-specific path if any */
-       alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+       if (fwctx->req->board_type)
+               alt_path = brcm_alt_fw_path(first->path,
+                                           fwctx->req->board_type);
        if (alt_path) {
                ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
                                              fwctx->dev, GFP_KERNEL, fwctx,
index 1364b00..208e73a 100644 (file)
@@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/
 obj-$(CONFIG_IWLEGACY) += iwlegacy/
 
 obj-$(CONFIG_IWLWIFI)  += iwlwifi/
+obj-$(CONFIG_IWLMEI)   += iwlwifi/
index c21c0c6..85e7042 100644 (file)
@@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR
 comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
        depends on IWLDVM=n && IWLMVM=n
 
-config IWLWIFI_BCAST_FILTERING
-       bool "Enable broadcast filtering"
-       depends on IWLMVM
-       help
-         Say Y here to enable default bcast filtering configuration.
-
-         Enabling broadcast filtering will drop any incoming wireless
-         broadcast frames, except some very specific predefined
-         patterns (e.g. incoming arp requests).
-
-         If unsure, don't enable this option, as some programs might
-         expect incoming broadcasts for their normal operations.
-
 menu "Debugging Options"
 
 config IWLWIFI_DEBUG
index 790c96d..c17ab53 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
  */
 #include <linux/uuid.h>
 #include "iwl-drv.h"
@@ -888,10 +888,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
         * only one using version 36, so skip this version entirely.
         */
        return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
-              IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
-              (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
-               ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
-                CSR_HW_REV_TYPE_7265D));
+               (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+                fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+               (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+                ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+                 CSR_HW_REV_TYPE_7265D));
 }
 IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
 
index 0703e41..35b8856 100644 (file)
@@ -501,11 +501,6 @@ enum iwl_legacy_cmds {
         */
        DEBUG_LOG_MSG = 0xf7,
 
-       /**
-        * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
-        */
-       BCAST_FILTER_CMD = 0xcf,
-
        /**
         * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
         */
index dd62a63..e44c70b 100644 (file)
@@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd {
        u8 addr_list[0];
 } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
 
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- *     start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
-       BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
-       BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type:       &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset:    starting offset of this pattern.
- * @reserved1: reserved
- * @val:       value to match - big endian (MSB is the first
- *             byte to match from offset pos).
- * @mask:      mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
-       u8 offset_type;
-       u8 offset;
-       __le16 reserved1;
-       __be32 val;
-       __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
-       BCAST_FILTER_FRAME_TYPE_ALL = 0,
-       BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- *     only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
-       u8 discard;
-       u8 frame_type;
-       u8 num_attrs;
-       u8 reserved1;
-       struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
-       u8 default_discard;
-       u8 reserved1;
-       __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
-       u8 disable;
-       u8 max_bcast_filters;
-       u8 max_macs;
-       u8 reserved1;
-       struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
-       struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
 #endif /* __iwl_fw_api_filter_h__ */
index 173a699..4a7723e 100644 (file)
@@ -752,7 +752,6 @@ struct iwl_lq_cmd {
 
 u8 iwl_fw_rate_idx_to_plcp(int idx);
 u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
 const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
 const char *iwl_rs_pretty_ant(u8 ant);
 const char *iwl_rs_pretty_bw(int bw);
index e4ebda6..efc6540 100644 (file)
@@ -181,7 +181,6 @@ struct iwl_ucode_capa {
  * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
  * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
  * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
  * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
  */
 enum iwl_ucode_tlv_flag {
@@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT       = BIT(24),
        IWL_UCODE_TLV_FLAGS_EBS_SUPPORT         = BIT(25),
        IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD        = BIT(26),
-       IWL_UCODE_TLV_FLAGS_BCAST_FILTERING     = BIT(29),
 };
 
 typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
index a21c3be..a835214 100644 (file)
@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
 }
 IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
 
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+       int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+       int idx;
+       bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+       int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+       int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+       for (idx = offset; idx < last; idx++)
+               if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+                       return idx - offset;
+       return IWL_RATE_INVALID;
+}
+
 u32 iwl_new_rate_from_v1(u32 rate_v1)
 {
        u32 rate_v2 = 0;
@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
        } else {
                u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
 
-               WARN_ON(legacy_rate < 0);
+               if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+                       legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+                               IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
                rate_v2 |= legacy_rate;
                if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
                        rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
 }
 IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
 
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
-       int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
-       int idx;
-       bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
-       int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
-       int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
-       for (idx = offset; idx < last; idx++)
-               if (iwl_fw_rate_idx_to_plcp(idx) == rate)
-                       return idx - offset;
-       return -1;
-}
-
 int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
 {
        char *type;
index f90d466..8e10ba8 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
@@ -329,6 +329,7 @@ enum {
 #define CSR_HW_REV_TYPE_2x00           (0x0000100)
 #define CSR_HW_REV_TYPE_105            (0x0000110)
 #define CSR_HW_REV_TYPE_135            (0x0000120)
+#define CSR_HW_REV_TYPE_3160           (0x0000164)
 #define CSR_HW_REV_TYPE_7265D          (0x0000210)
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 #define CSR_HW_REV_TYPE_QNJ            (0x0000360)
index 83e3b73..6651e78 100644 (file)
@@ -1707,6 +1707,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
  out_unbind:
        complete(&drv->request_firmware_complete);
        device_release_driver(drv->trans->dev);
+       /* drv has just been freed by the release */
+       failure = false;
  free:
        if (failure)
                iwl_dealloc_ucode(drv);
index dd58c8f..04addf9 100644 (file)
@@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                        .has_he = true,
                        .he_cap_elem = {
                                .mac_cap_info[0] =
-                                       IEEE80211_HE_MAC_CAP0_HTC_HE |
-                                       IEEE80211_HE_MAC_CAP0_TWT_REQ,
+                                       IEEE80211_HE_MAC_CAP0_HTC_HE,
                                .mac_cap_info[1] =
                                        IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
                                        IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
index d9733aa..2f7f0f9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
  */
 
 #include <linux/etherdevice.h>
@@ -146,6 +146,7 @@ struct iwl_mei_filters {
  * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
  *     to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
  *     flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
  * @csa_throttle_end_wk: used when &csa_throttled is true
  * @data_q_lock: protects the access to the data queues which are
  *     accessed without the mutex.
@@ -165,6 +166,7 @@ struct iwl_mei {
        bool amt_enabled;
        bool csa_throttled;
        bool csme_taking_ownership;
+       bool link_prot_state;
        struct delayed_work csa_throttle_end_wk;
        spinlock_t data_q_lock;
 
@@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
        if (IS_ERR(mem->ctrl)) {
                int ret = PTR_ERR(mem->ctrl);
 
-               dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
-                       ret);
                mem->ctrl = NULL;
 
                return ret;
@@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
 
        iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
 
+       mei->link_prot_state = status->link_prot_state;
+
        /*
         * Update the Rfkill state in case the host does not own the device:
         * if we are in Link Protection, ask to not touch the device, else,
@@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
                        mei_cldev_get_drvdata(iwl_mei_global_cldev);
 
                /* we have already a SAP connection */
-               if (iwl_mei_is_connected())
+               if (iwl_mei_is_connected()) {
                        iwl_mei_send_sap_msg(mei->cldev,
                                             SAP_MSG_NOTIF_WIFIDR_UP);
+                       ops->rfkill(priv, mei->link_prot_state);
+               }
        }
        ret = 0;
 
@@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
 
 #endif /* CONFIG_DEBUG_FS */
 
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
 /*
  * iwl_mei_probe - the probe function called by the mei bus enumeration
  *
@@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
 static int iwl_mei_probe(struct mei_cl_device *cldev,
                         const struct mei_cl_device_id *id)
 {
+       int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
        struct iwl_mei *mei;
        int ret;
 
@@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
        mei_cldev_set_drvdata(cldev, mei);
        mei->cldev = cldev;
 
-       /*
-        * The CSME firmware needs to boot the internal WLAN client. Wait here
-        * so that the DMA map request will succeed.
-        */
-       msleep(20);
+       do {
+               ret = iwl_mei_alloc_shared_mem(cldev);
+               if (!ret)
+                       break;
+               /*
+                * The CSME firmware needs to boot the internal WLAN client.
+                * This can take time in certain configurations (usually
+                * upon resume and when the whole CSME firmware is shut down
+                * during suspend).
+                *
+                * Wait a bit before retrying and hope we'll succeed next time.
+                */
 
-       ret = iwl_mei_alloc_shared_mem(cldev);
-       if (ret)
+               dev_dbg(&cldev->dev,
+                       "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+                       ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+               msleep(100);
+               alloc_retry--;
+       } while (alloc_retry);
+
+       if (ret) {
+               dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+                       ret);
                goto free;
+       }
 
        iwl_mei_init_shared_mem(mei);
 
index 5f966af..468102a 100644 (file)
@@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
        bool match;
 
        if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
-           !pskb_may_pull(skb, skb_network_offset(skb) +
-                          sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+           !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
                return false;
 
        iphdrlen = ip_hdrlen(skb);
index fb4920b..445c94a 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
 #include <linux/vmalloc.h>
+#include <linux/err.h>
 #include <linux/ieee80211.h>
 #include <linux/netdevice.h>
 
@@ -1369,189 +1370,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
        return count;
 }
 
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
-                                           char __user *user_buf,
-                                           size_t count, loff_t *ppos)
-{
-       struct iwl_mvm *mvm = file->private_data;
-       struct iwl_bcast_filter_cmd cmd;
-       const struct iwl_fw_bcast_filter *filter;
-       char *buf;
-       int bufsz = 1024;
-       int i, j, pos = 0;
-       ssize_t ret;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       mutex_lock(&mvm->mutex);
-       if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
-               ADD_TEXT("None\n");
-               mutex_unlock(&mvm->mutex);
-               goto out;
-       }
-       mutex_unlock(&mvm->mutex);
-
-       for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
-               filter = &cmd.filters[i];
-
-               ADD_TEXT("Filter [%d]:\n", i);
-               ADD_TEXT("\tDiscard=%d\n", filter->discard);
-               ADD_TEXT("\tFrame Type: %s\n",
-                        filter->frame_type ? "IPv4" : "Generic");
-
-               for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
-                       const struct iwl_fw_bcast_filter_attr *attr;
-
-                       attr = &filter->attrs[j];
-                       if (!attr->mask)
-                               break;
-
-                       ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
-                                j, attr->offset,
-                                attr->offset_type ? "IP End" :
-                                                    "Payload Start",
-                                be32_to_cpu(attr->mask),
-                                be32_to_cpu(attr->val),
-                                le16_to_cpu(attr->reserved1));
-               }
-       }
-out:
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
-                                            size_t count, loff_t *ppos)
-{
-       int pos, next_pos;
-       struct iwl_fw_bcast_filter filter = {};
-       struct iwl_bcast_filter_cmd cmd;
-       u32 filter_id, attr_id, mask, value;
-       int err = 0;
-
-       if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
-                  &filter.frame_type, &pos) != 3)
-               return -EINVAL;
-
-       if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
-           filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
-               return -EINVAL;
-
-       for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
-            attr_id++) {
-               struct iwl_fw_bcast_filter_attr *attr =
-                               &filter.attrs[attr_id];
-
-               if (pos >= count)
-                       break;
-
-               if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
-                          &attr->offset, &attr->offset_type,
-                          &mask, &value, &next_pos) != 4)
-                       return -EINVAL;
-
-               attr->mask = cpu_to_be32(mask);
-               attr->val = cpu_to_be32(value);
-               if (mask)
-                       filter.num_attrs++;
-
-               pos += next_pos;
-       }
-
-       mutex_lock(&mvm->mutex);
-       memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
-              &filter, sizeof(filter));
-
-       /* send updated bcast filtering configuration */
-       if (iwl_mvm_firmware_running(mvm) &&
-           mvm->dbgfs_bcast_filtering.override &&
-           iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
-               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
-                                          sizeof(cmd), &cmd);
-       mutex_unlock(&mvm->mutex);
-
-       return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
-                                                char __user *user_buf,
-                                                size_t count, loff_t *ppos)
-{
-       struct iwl_mvm *mvm = file->private_data;
-       struct iwl_bcast_filter_cmd cmd;
-       char *buf;
-       int bufsz = 1024;
-       int i, pos = 0;
-       ssize_t ret;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       mutex_lock(&mvm->mutex);
-       if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
-               ADD_TEXT("None\n");
-               mutex_unlock(&mvm->mutex);
-               goto out;
-       }
-       mutex_unlock(&mvm->mutex);
-
-       for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
-               const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
-               ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
-                        i, mac->default_discard, mac->attached_filters);
-       }
-out:
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
-                                                 char *buf, size_t count,
-                                                 loff_t *ppos)
-{
-       struct iwl_bcast_filter_cmd cmd;
-       struct iwl_fw_bcast_mac mac = {};
-       u32 mac_id, attached_filters;
-       int err = 0;
-
-       if (!mvm->bcast_filters)
-               return -ENOENT;
-
-       if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
-                  &attached_filters) != 3)
-               return -EINVAL;
-
-       if (mac_id >= ARRAY_SIZE(cmd.macs) ||
-           mac.default_discard > 1 ||
-           attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
-               return -EINVAL;
-
-       mac.attached_filters = cpu_to_le16(attached_filters);
-
-       mutex_lock(&mvm->mutex);
-       memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
-              &mac, sizeof(mac));
-
-       /* send updated bcast filtering configuration */
-       if (iwl_mvm_firmware_running(mvm) &&
-           mvm->dbgfs_bcast_filtering.override &&
-           iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
-               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
-                                          sizeof(cmd), &cmd);
-       mutex_unlock(&mvm->mutex);
-
-       return err ?: count;
-}
-#endif
-
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1881,11 +1699,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
 
 MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
 
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
 #ifdef CONFIG_ACPI
 MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
 #endif
@@ -2045,7 +1858,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
 void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
 {
        struct dentry *bcast_dir __maybe_unused;
-       char buf[100];
 
        spin_lock_init(&mvm->drv_stats_lock);
 
@@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
 
        MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
 
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
-               bcast_dir = debugfs_create_dir("bcast_filtering",
-                                              mvm->debugfs_dir);
-
-               debugfs_create_bool("override", 0600, bcast_dir,
-                                   &mvm->dbgfs_bcast_filtering.override);
-
-               MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
-                                          bcast_dir, 0600);
-               MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
-                                          bcast_dir, 0600);
-       }
-#endif
-
 #ifdef CONFIG_PM_SLEEP
        MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
        debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
@@ -2142,6 +1939,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
         * Create a symlink with mac80211. It will be removed when mac80211
         * exists (before the opmode exists which removes the target.)
         */
-       snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
-       debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
+       if (!IS_ERR(mvm->debugfs_dir)) {
+               char buf[100];
+
+               snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
+               debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir,
+                                      buf);
+       }
 }
index 6f4690e..ae589b3 100644 (file)
@@ -1741,7 +1741,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        ret = iwl_mvm_sar_init(mvm);
        if (ret == 0)
                ret = iwl_mvm_sar_geo_init(mvm);
-       else if (ret < 0)
+       if (ret < 0)
                goto error;
 
        ret = iwl_mvm_sgom_init(mvm);
index 65f4fe3..709a3df 100644 (file)
@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
        },
 };
 
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- *     be the vif's ip address. in case there is not a single
- *     ip address (0, or more than 1), this attribute will
- *     be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- *     the LSB bytes of the vif's mac address
- */
-enum {
-       BC_FILTER_MAGIC_NONE = 0,
-       BC_FILTER_MAGIC_IP,
-       BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
-       {
-               /* arp */
-               .discard = 0,
-               .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
-               .attrs = {
-                       {
-                               /* frame type - arp, hw type - ethernet */
-                               .offset_type =
-                                       BCAST_FILTER_OFFSET_PAYLOAD_START,
-                               .offset = sizeof(rfc1042_header),
-                               .val = cpu_to_be32(0x08060001),
-                               .mask = cpu_to_be32(0xffffffff),
-                       },
-                       {
-                               /* arp dest ip */
-                               .offset_type =
-                                       BCAST_FILTER_OFFSET_PAYLOAD_START,
-                               .offset = sizeof(rfc1042_header) + 2 +
-                                         sizeof(struct arphdr) +
-                                         ETH_ALEN + sizeof(__be32) +
-                                         ETH_ALEN,
-                               .mask = cpu_to_be32(0xffffffff),
-                               /* mark it as special field */
-                               .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
-                       },
-               },
-       },
-       {
-               /* dhcp offer bcast */
-               .discard = 0,
-               .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
-               .attrs = {
-                       {
-                               /* udp dest port - 68 (bootp client)*/
-                               .offset_type = BCAST_FILTER_OFFSET_IP_END,
-                               .offset = offsetof(struct udphdr, dest),
-                               .val = cpu_to_be32(0x00440000),
-                               .mask = cpu_to_be32(0xffff0000),
-                       },
-                       {
-                               /* dhcp - lsb bytes of client hw address */
-                               .offset_type = BCAST_FILTER_OFFSET_IP_END,
-                               .offset = 38,
-                               .mask = cpu_to_be32(0xffffffff),
-                               /* mark it as special field */
-                               .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
-                       },
-               },
-       },
-       /* last filter must be empty */
-       {},
-};
-#endif
-
 static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
        .max_peers = IWL_MVM_TOF_MAX_APS,
        .report_ap_tsf = 1,
@@ -299,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = {
         [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
         [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
         [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
-        [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
 };
 
 static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
@@ -693,11 +619,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 #endif
 
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-       /* assign default bcast filtering configuration */
-       mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
        ret = iwl_mvm_leds_init(mvm);
        if (ret)
                return ret;
@@ -1853,162 +1774,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
-       struct iwl_mvm *mvm;
-       struct iwl_bcast_filter_cmd *cmd;
-       u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
-                        const struct iwl_fw_bcast_filter *in_filter,
-                        struct iwl_fw_bcast_filter *out_filter)
-{
-       struct iwl_fw_bcast_filter_attr *attr;
-       int i;
-
-       memcpy(out_filter, in_filter, sizeof(*out_filter));
-
-       for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
-               attr = &out_filter->attrs[i];
-
-               if (!attr->mask)
-                       break;
-
-               switch (attr->reserved1) {
-               case cpu_to_le16(BC_FILTER_MAGIC_IP):
-                       if (vif->bss_conf.arp_addr_cnt != 1) {
-                               attr->mask = 0;
-                               continue;
-                       }
-
-                       attr->val = vif->bss_conf.arp_addr_list[0];
-                       break;
-               case cpu_to_le16(BC_FILTER_MAGIC_MAC):
-                       attr->val = *(__be32 *)&vif->addr[2];
-                       break;
-               default:
-                       break;
-               }
-               attr->reserved1 = 0;
-               out_filter->num_attrs++;
-       }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
-                                         struct ieee80211_vif *vif)
-{
-       struct iwl_bcast_iter_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-       struct iwl_bcast_filter_cmd *cmd = data->cmd;
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_fw_bcast_mac *bcast_mac;
-       int i;
-
-       if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
-               return;
-
-       bcast_mac = &cmd->macs[mvmvif->id];
-
-       /*
-        * enable filtering only for associated stations, but not for P2P
-        * Clients
-        */
-       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
-           !vif->bss_conf.assoc)
-               return;
-
-       bcast_mac->default_discard = 1;
-
-       /* copy all configured filters */
-       for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
-               /*
-                * Make sure we don't exceed our filters limit.
-                * if there is still a valid filter to be configured,
-                * be on the safe side and just allow bcast for this mac.
-                */
-               if (WARN_ON_ONCE(data->current_filter >=
-                                ARRAY_SIZE(cmd->filters))) {
-                       bcast_mac->default_discard = 0;
-                       bcast_mac->attached_filters = 0;
-                       break;
-               }
-
-               iwl_mvm_set_bcast_filter(vif,
-                                        &mvm->bcast_filters[i],
-                                        &cmd->filters[data->current_filter]);
-
-               /* skip current filter if it contains no attributes */
-               if (!cmd->filters[data->current_filter].num_attrs)
-                       continue;
-
-               /* attach the filter to current mac */
-               bcast_mac->attached_filters |=
-                               cpu_to_le16(BIT(data->current_filter));
-
-               data->current_filter++;
-       }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
-                                   struct iwl_bcast_filter_cmd *cmd)
-{
-       struct iwl_bcast_iter_data iter_data = {
-               .mvm = mvm,
-               .cmd = cmd,
-       };
-
-       if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
-               return false;
-
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
-       cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       /* use debugfs filters/macs if override is configured */
-       if (mvm->dbgfs_bcast_filtering.override) {
-               memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
-                      sizeof(cmd->filters));
-               memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
-                      sizeof(cmd->macs));
-               return true;
-       }
-#endif
-
-       /* if no filters are configured, do nothing */
-       if (!mvm->bcast_filters)
-               return false;
-
-       /* configure and attach these filters for each associated sta vif */
-       ieee80211_iterate_active_interfaces(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_bcast_filter_iterator, &iter_data);
-
-       return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
-       struct iwl_bcast_filter_cmd cmd;
-
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
-               return 0;
-
-       if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
-               return 0;
-
-       return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
-                                   sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
-       return 0;
-}
-#endif
-
 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif)
 {
@@ -2520,7 +2285,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                }
 
                iwl_mvm_recalc_multicast(mvm);
-               iwl_mvm_configure_bcast_filter(mvm);
 
                /* reset rssi values */
                mvmvif->bf_data.ave_beacon_signal = 0;
@@ -2570,11 +2334,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                }
        }
 
-       if (changes & BSS_CHANGED_ARP_FILTER) {
-               IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
-               iwl_mvm_configure_bcast_filter(mvm);
-       }
-
        if (changes & BSS_CHANGED_BANDWIDTH)
                iwl_mvm_apply_fw_smps_request(vif);
 }
index 1dcbb0e..d78f407 100644 (file)
@@ -884,17 +884,6 @@ struct iwl_mvm {
        /* rx chain antennas set through debugfs for the scan command */
        u8 scan_rx_ant;
 
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-       /* broadcast filters to configure for each associated station */
-       const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       struct {
-               bool override;
-               struct iwl_bcast_filter_cmd cmd;
-       } dbgfs_bcast_filtering;
-#endif
-#endif
-
        /* Internal station */
        struct iwl_mvm_int_sta aux_sta;
        struct iwl_mvm_int_sta snif_sta;
@@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
 
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
-                                   struct iwl_bcast_filter_cmd *cmd);
 
 /*
  * FW notifications / CMD responses handlers
@@ -2225,7 +2212,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
 static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
 {
        bool sw_rfkill =
-               mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+               mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
 
        if (mvm->mei_registered)
                iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
index 87630d3..1f8b979 100644 (file)
@@ -469,7 +469,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(MCC_CHUB_UPDATE_CMD),
        HCMD_NAME(MARKER_CMD),
        HCMD_NAME(BT_PROFILE_NOTIFICATION),
-       HCMD_NAME(BCAST_FILTER_CMD),
        HCMD_NAME(MCAST_FILTER_CMD),
        HCMD_NAME(REPLY_SF_CFG_CMD),
        HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
index 6fa2c12..9213f85 100644 (file)
@@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
        struct ieee80211_tx_rate *r = &info->status.rates[0];
 
        if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
-                                   TX_CMD, 0) > 6)
+                                   TX_CMD, 0) <= 6)
                rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
 
        info->status.antenna =
index 7845036..080a158 100644 (file)
@@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
 {
        struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_mei_get_ownership(mvm);
+       ret = iwl_mvm_mei_get_ownership(mvm);
        mutex_unlock(&mvm->mutex);
 
-       return 0;
+       return ret;
 }
 
 static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
index 0febdca..94f40c4 100644 (file)
@@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
        /* This may fail if AMT took ownership of the device */
        if (iwl_pcie_prepare_card_hw(trans)) {
                IWL_WARN(trans, "Exit HW not ready\n");
-               ret = -EIO;
-               goto out;
+               return -EIO;
        }
 
        iwl_enable_rfkill_int(trans);
index a63386a..ef14584 100644 (file)
@@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        /* This may fail if AMT took ownership of the device */
        if (iwl_pcie_prepare_card_hw(trans)) {
                IWL_WARN(trans, "Exit HW not ready\n");
-               ret = -EIO;
-               goto out;
+               return -EIO;
        }
 
        iwl_enable_rfkill_int(trans);
index 8d54f9f..fc5725f 100644 (file)
@@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work)
                        if (req->ie_len)
                                skb_put_data(probe, req->ie, req->ie_len);
 
+                       if (!ieee80211_tx_prepare_skb(hwsim->hw,
+                                                     hwsim->hw_scan_vif,
+                                                     probe,
+                                                     hwsim->tmp_chan->band,
+                                                     NULL)) {
+                               kfree_skb(probe);
+                               continue;
+                       }
+
                        local_bh_disable();
                        mac80211_hwsim_tx_frame(hwsim->hw, probe,
                                                hwsim->tmp_chan);
@@ -3770,6 +3779,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
                }
                txi->flags |= IEEE80211_TX_STAT_ACK;
        }
+
+       if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+               txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
        ieee80211_tx_status_irqsafe(data2->hw, skb);
        return 0;
 out:
index d24b7a7..990360d 100644 (file)
@@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be)
                unsigned int queue_index;
 
                xen_unregister_watchers(vif);
+               xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
 #ifdef CONFIG_DEBUG_FS
                xenvif_debugfs_delif(vif);
 #endif /* CONFIG_DEBUG_FS */
@@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
 
                /* Not interested in this watch anymore. */
                unregister_hotplug_status_watch(be);
-               xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
        }
        kfree(str);
 }
@@ -824,15 +824,11 @@ static void connect(struct backend_info *be)
        xenvif_carrier_on(be->vif);
 
        unregister_hotplug_status_watch(be);
-       if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
-               err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
-                                          NULL, hotplug_status_changed,
-                                          "%s/%s", dev->nodename,
-                                          "hotplug-status");
-               if (err)
-                       goto err;
+       err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+                                  hotplug_status_changed,
+                                  "%s/%s", dev->nodename, "hotplug-status");
+       if (!err)
                be->have_hotplug_status_watch = 1;
-       }
 
        netif_tx_wake_all_queues(be->vif->dev);
 
index 8b18246..7748f07 100644 (file)
@@ -842,6 +842,28 @@ static int xennet_close(struct net_device *dev)
        return 0;
 }
 
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+       unsigned int i;
+
+       for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               if (netif_running(info->netdev))
+                       napi_disable(&queue->napi);
+               netif_napi_del(&queue->napi);
+       }
+
+       kfree(info->queues);
+       info->queues = NULL;
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+       struct netfront_info *np = netdev_priv(dev);
+       xennet_destroy_queues(np);
+}
+
 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
 {
        unsigned long flags;
@@ -1611,6 +1633,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 }
 
 static const struct net_device_ops xennet_netdev_ops = {
+       .ndo_uninit          = xennet_uninit,
        .ndo_open            = xennet_open,
        .ndo_stop            = xennet_close,
        .ndo_start_xmit      = xennet_start_xmit,
@@ -2103,22 +2126,6 @@ error:
        return err;
 }
 
-static void xennet_destroy_queues(struct netfront_info *info)
-{
-       unsigned int i;
-
-       for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
-               struct netfront_queue *queue = &info->queues[i];
-
-               if (netif_running(info->netdev))
-                       napi_disable(&queue->napi);
-               netif_napi_del(&queue->napi);
-       }
-
-       kfree(info->queues);
-       info->queues = NULL;
-}
-
 
 
 static int xennet_create_page_pool(struct netfront_queue *queue)
index fede051..4081fc5 100644 (file)
@@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
        return NTB_TOPO_NONE;
 }
 
+static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+       switch (ppd & SPR_PPD_TOPO_MASK) {
+       case SPR_PPD_TOPO_B2B_USD:
+               return NTB_TOPO_B2B_USD;
+       case SPR_PPD_TOPO_B2B_DSD:
+               return NTB_TOPO_B2B_DSD;
+       }
+
+       return NTB_TOPO_NONE;
+}
+
 int gen4_init_dev(struct intel_ntb_dev *ndev)
 {
        struct pci_dev *pdev = ndev->ntb.pdev;
@@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
        }
 
        ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
-       ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+       if (pdev_is_ICX(pdev))
+               ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+       else if (pdev_is_SPR(pdev))
+               ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
        dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
                ntb_topo_string(ndev->ntb.topo));
        if (ndev->ntb.topo == NTB_TOPO_NONE)
index 3fcd3fd..f91323e 100644 (file)
 #define GEN4_PPD_CLEAR_TRN             0x0001
 #define GEN4_PPD_LINKTRN               0x0008
 #define GEN4_PPD_CONN_MASK             0x0300
+#define SPR_PPD_CONN_MASK              0x0700
 #define GEN4_PPD_CONN_B2B              0x0200
 #define GEN4_PPD_DEV_MASK              0x1000
 #define GEN4_PPD_DEV_DSD               0x1000
 #define GEN4_PPD_DEV_USD               0x0000
+#define SPR_PPD_DEV_MASK               0x4000
+#define SPR_PPD_DEV_DSD                0x4000
+#define SPR_PPD_DEV_USD                0x0000
 #define GEN4_LINK_CTRL_LINK_DISABLE    0x0010
 
 #define GEN4_SLOTSTS                   0xb05a
 #define GEN4_PPD_TOPO_B2B_USD  (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD)
 #define GEN4_PPD_TOPO_B2B_DSD  (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD)
 
+#define SPR_PPD_TOPO_MASK      (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK)
+#define SPR_PPD_TOPO_B2B_USD   (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD)
+#define SPR_PPD_TOPO_B2B_DSD   (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD)
+
 #define GEN4_DB_COUNT                  32
 #define GEN4_DB_LINK                   32
 #define GEN4_DB_LINK_BIT               BIT_ULL(GEN4_DB_LINK)
@@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev)
        return 0;
 }
 
+static inline int pdev_is_SPR(struct pci_dev *pdev)
+{
+       if (pdev_is_gen4(pdev) &&
+           pdev->revision > PCI_DEVICE_REVISION_ICX_MAX)
+               return 1;
+       return 0;
+}
+
 #endif
index dd683cb..6295e55 100644 (file)
@@ -33,7 +33,6 @@ int ntb_msi_init(struct ntb_dev *ntb,
 {
        phys_addr_t mw_phys_addr;
        resource_size_t mw_size;
-       size_t struct_size;
        int peer_widx;
        int peers;
        int ret;
@@ -43,9 +42,8 @@ int ntb_msi_init(struct ntb_dev *ntb,
        if (peers <= 0)
                return -EINVAL;
 
-       struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers;
-
-       ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+       ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers),
+                               GFP_KERNEL);
        if (!ntb->msi)
                return -ENOMEM;
 
index 961a5f8..fd4720d 100644 (file)
@@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
 
 void nvme_complete_batch_req(struct request *req)
 {
+       trace_nvme_complete_rq(req);
        nvme_cleanup_cmd(req);
        nvme_end_req_zoned(req);
 }
@@ -1722,7 +1723,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        return 0;
 }
 
-static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        struct nvme_ctrl *ctrl = ns->ctrl;
 
@@ -1738,7 +1739,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
 
        ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
        if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
-               return 0;
+               return;
+
        if (ctrl->ops->flags & NVME_F_FABRICS) {
                /*
                 * The NVMe over Fabrics specification only supports metadata as
@@ -1746,7 +1748,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                 * remap the separate metadata buffer from the block layer.
                 */
                if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
-                       return -EINVAL;
+                       return;
 
                ns->features |= NVME_NS_EXT_LBAS;
 
@@ -1773,8 +1775,6 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                else
                        ns->features |= NVME_NS_METADATA_SUPPORTED;
        }
-
-       return 0;
 }
 
 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -1915,9 +1915,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
        ns->lba_shift = id->lbaf[lbaf].ds;
        nvme_set_queue_limits(ns->ctrl, ns->queue);
 
-       ret = nvme_configure_metadata(ns, id);
-       if (ret)
-               goto out_unfreeze;
+       nvme_configure_metadata(ns, id);
        nvme_set_chunk_sectors(ns, id);
        nvme_update_disk_info(ns->disk, ns, id);
 
@@ -1933,7 +1931,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
        if (blk_queue_is_zoned(ns->queue)) {
                ret = nvme_revalidate_zones(ns);
                if (ret && !nvme_first_scan(ns->disk))
-                       goto out;
+                       return ret;
        }
 
        if (nvme_ns_head_multipath(ns->head)) {
@@ -1948,16 +1946,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
        return 0;
 
 out_unfreeze:
-       blk_mq_unfreeze_queue(ns->disk->queue);
-out:
        /*
         * If probing fails due an unsupported feature, hide the block device,
         * but still allow other access.
         */
        if (ret == -ENODEV) {
                ns->disk->flags |= GENHD_FL_HIDDEN;
+               set_bit(NVME_NS_READY, &ns->flags);
                ret = 0;
        }
+       blk_mq_unfreeze_queue(ns->disk->queue);
        return ret;
 }
 
@@ -4573,7 +4571,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
        if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
                return;
 
-       blk_set_queue_dying(ns->queue);
+       blk_mark_disk_dead(ns->disk);
        nvme_start_ns_queue(ns);
 
        set_capacity_and_notify(ns->disk, 0);
index f8bf660..ff77523 100644 (file)
@@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
-       blk_set_queue_dying(head->disk->queue);
+       blk_mark_disk_dead(head->disk);
        /* make sure all pending bios are cleaned up */
        kblockd_schedule_work(&head->requeue_work);
        flush_work(&head->requeue_work);
index 01e24b5..65e00c6 100644 (file)
@@ -44,6 +44,8 @@ struct nvme_tcp_request {
        u32                     data_len;
        u32                     pdu_len;
        u32                     pdu_sent;
+       u32                     h2cdata_left;
+       u32                     h2cdata_offset;
        u16                     ttag;
        __le16                  status;
        struct list_head        entry;
@@ -95,6 +97,7 @@ struct nvme_tcp_queue {
        struct nvme_tcp_request *request;
 
        int                     queue_size;
+       u32                     maxh2cdata;
        size_t                  cmnd_capsule_len;
        struct nvme_tcp_ctrl    *ctrl;
        unsigned long           flags;
@@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
        return ret;
 }
 
-static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
-               struct nvme_tcp_r2t_pdu *pdu)
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
 {
        struct nvme_tcp_data_pdu *data = req->pdu;
        struct nvme_tcp_queue *queue = req->queue;
        struct request *rq = blk_mq_rq_from_pdu(req);
+       u32 h2cdata_sent = req->pdu_len;
        u8 hdgst = nvme_tcp_hdgst_len(queue);
        u8 ddgst = nvme_tcp_ddgst_len(queue);
 
        req->state = NVME_TCP_SEND_H2C_PDU;
        req->offset = 0;
-       req->pdu_len = le32_to_cpu(pdu->r2t_length);
+       req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
        req->pdu_sent = 0;
+       req->h2cdata_left -= req->pdu_len;
+       req->h2cdata_offset += h2cdata_sent;
 
        memset(data, 0, sizeof(*data));
        data->hdr.type = nvme_tcp_h2c_data;
-       data->hdr.flags = NVME_TCP_F_DATA_LAST;
+       if (!req->h2cdata_left)
+               data->hdr.flags = NVME_TCP_F_DATA_LAST;
        if (queue->hdr_digest)
                data->hdr.flags |= NVME_TCP_F_HDGST;
        if (queue->data_digest)
@@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
        data->hdr.pdo = data->hdr.hlen + hdgst;
        data->hdr.plen =
                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
-       data->ttag = pdu->ttag;
+       data->ttag = req->ttag;
        data->command_id = nvme_cid(rq);
-       data->data_offset = pdu->r2t_offset;
+       data->data_offset = cpu_to_le32(req->h2cdata_offset);
        data->data_length = cpu_to_le32(req->pdu_len);
 }
 
@@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
        struct nvme_tcp_request *req;
        struct request *rq;
        u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+       u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
 
        rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
        if (!rq) {
@@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
                return -EPROTO;
        }
 
-       if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+       if (unlikely(r2t_offset < req->data_sent)) {
                dev_err(queue->ctrl->ctrl.device,
                        "req %d unexpected r2t offset %u (expected %zu)\n",
-                       rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+                       rq->tag, r2t_offset, req->data_sent);
                return -EPROTO;
        }
 
-       nvme_tcp_setup_h2c_data_pdu(req, pdu);
+       req->pdu_len = 0;
+       req->h2cdata_left = r2t_length;
+       req->h2cdata_offset = r2t_offset;
+       req->ttag = pdu->ttag;
+
+       nvme_tcp_setup_h2c_data_pdu(req);
        nvme_tcp_queue_request(req, false, true);
 
        return 0;
@@ -913,13 +925,22 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
 
 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
 {
-       nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+       if (nvme_tcp_async_req(req)) {
+               union nvme_result res = {};
+
+               nvme_complete_async_event(&req->queue->ctrl->ctrl,
+                               cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+       } else {
+               nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+                               NVME_SC_HOST_PATH_ERROR);
+       }
 }
 
 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
 {
        struct nvme_tcp_queue *queue = req->queue;
        int req_data_len = req->data_len;
+       u32 h2cdata_left = req->h2cdata_left;
 
        while (true) {
                struct page *page = nvme_tcp_req_cur_page(req);
@@ -964,7 +985,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                                req->state = NVME_TCP_SEND_DDGST;
                                req->offset = 0;
                        } else {
-                               nvme_tcp_done_send_req(queue);
+                               if (h2cdata_left)
+                                       nvme_tcp_setup_h2c_data_pdu(req);
+                               else
+                                       nvme_tcp_done_send_req(queue);
                        }
                        return 1;
                }
@@ -1022,9 +1046,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
        if (queue->hdr_digest && !req->offset)
                nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 
-       ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
-                       offset_in_page(pdu) + req->offset, len,
-                       MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+       if (!req->h2cdata_left)
+               ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+                               offset_in_page(pdu) + req->offset, len,
+                               MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+       else
+               ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
+                               offset_in_page(pdu) + req->offset, len,
+                               MSG_DONTWAIT | MSG_MORE);
        if (unlikely(ret <= 0))
                return ret;
 
@@ -1044,6 +1073,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
 {
        struct nvme_tcp_queue *queue = req->queue;
        size_t offset = req->offset;
+       u32 h2cdata_left = req->h2cdata_left;
        int ret;
        struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
        struct kvec iov = {
@@ -1061,7 +1091,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
                return ret;
 
        if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
-               nvme_tcp_done_send_req(queue);
+               if (h2cdata_left)
+                       nvme_tcp_setup_h2c_data_pdu(req);
+               else
+                       nvme_tcp_done_send_req(queue);
                return 1;
        }
 
@@ -1253,6 +1286,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
        struct msghdr msg = {};
        struct kvec iov;
        bool ctrl_hdgst, ctrl_ddgst;
+       u32 maxh2cdata;
        int ret;
 
        icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
@@ -1336,6 +1370,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
                goto free_icresp;
        }
 
+       maxh2cdata = le32_to_cpu(icresp->maxdata);
+       if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+               pr_err("queue %d: invalid maxh2cdata returned %u\n",
+                      nvme_tcp_queue_id(queue), maxh2cdata);
+               goto free_icresp;
+       }
+       queue->maxh2cdata = maxh2cdata;
+
        ret = 0;
 free_icresp:
        kfree(icresp);
@@ -2321,6 +2363,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
        req->data_sent = 0;
        req->pdu_len = 0;
        req->pdu_sent = 0;
+       req->h2cdata_left = 0;
        req->data_len = blk_rq_nr_phys_segments(rq) ?
                                blk_rq_payload_bytes(rq) : 0;
        req->curr_bio = rq->bio;
index 23a38dc..9fd1602 100644 (file)
@@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 
        if (config->wp_gpio)
                nvmem->wp_gpio = config->wp_gpio;
-       else
+       else if (!config->ignore_wp)
                nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
                                                    GPIOD_OUT_HIGH);
        if (IS_ERR(nvmem->wp_gpio)) {
index ad85ff6..ec315b0 100644 (file)
@@ -648,8 +648,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
        }
 
        fdt_scan_reserved_mem();
-       fdt_init_reserved_mem();
        fdt_reserve_elfcorehdr();
+       fdt_init_reserved_mem();
 }
 
 /**
index 7099210..2c2fb16 100644 (file)
@@ -513,24 +513,24 @@ static void __init of_unittest_parse_phandle_with_args(void)
        memset(&args, 0, sizeof(args));
 
        EXPECT_BEGIN(KERN_INFO,
-                    "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+                    "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
 
        rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
                                        "#phandle-cells", 1, &args);
 
        EXPECT_END(KERN_INFO,
-                  "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+                  "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
 
        unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
 
        EXPECT_BEGIN(KERN_INFO,
-                    "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+                    "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
 
        rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
                                        "#phandle-cells");
 
        EXPECT_END(KERN_INFO,
-                  "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+                  "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
 
        unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
 }
@@ -670,12 +670,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
        memset(&args, 0, sizeof(args));
 
        EXPECT_BEGIN(KERN_INFO,
-                    "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+                    "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
 
        rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
                                            "phandle", 1, &args);
        EXPECT_END(KERN_INFO,
-                  "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+                  "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
 
        unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
 }
@@ -1257,12 +1257,12 @@ static void __init of_unittest_platform_populate(void)
                unittest(pdev, "device 2 creation failed\n");
 
                EXPECT_BEGIN(KERN_INFO,
-                            "platform testcase-data:testcase-device2: IRQ index 0 not found");
+                            "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
 
                irq = platform_get_irq(pdev, 0);
 
                EXPECT_END(KERN_INFO,
-                          "platform testcase-data:testcase-device2: IRQ index 0 not found");
+                          "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
 
                unittest(irq < 0 && irq != -EPROBE_DEFER,
                         "device parsing error failed - %d\n", irq);
index 059566f..9be007c 100644 (file)
@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
        ioc->usg_calls++;
 #endif
 
-       while(sg_dma_len(sglist) && nents--) {
+       while (nents && sg_dma_len(sglist)) {
 
 #ifdef CCIO_COLLECT_STATS
                ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
                ccio_unmap_page(dev, sg_dma_address(sglist),
                                  sg_dma_len(sglist), direction, 0);
                ++sglist;
+               nents--;
        }
 
        DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
index e60690d..374b919 100644 (file)
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
-       while (sg_dma_len(sglist) && nents--) {
+       while (nents && sg_dma_len(sglist)) {
 
                sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
                                direction, 0);
@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
                ioc->usingle_calls--;   /* kluge since call is unmap_sg() */
 #endif
                ++sglist;
+               nents--;
        }
 
        DBG_RUN_SG("%s() DONE (nents %d)\n", __func__,  nents);
index 20ea2ee..ae0bc2f 100644 (file)
@@ -2155,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
                if (!hv_dev)
                        continue;
 
-               if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
-                       set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+               if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+                   hv_dev->desc.virtual_numa_node < num_possible_nodes())
+                       /*
+                        * The kernel may boot with some NUMA nodes offline
+                        * (e.g. in a KDUMP kernel) or with NUMA disabled via
+                        * "numa=off". In those cases, adjust the host provided
+                        * NUMA node to a valid NUMA node used by the kernel.
+                        */
+                       set_dev_node(&dev->dev,
+                                    numa_map_to_online_node(
+                                            hv_dev->desc.virtual_numa_node));
 
                put_pcichild(hv_dev);
        }
index 71258ea..f8e82c5 100644 (file)
@@ -1329,7 +1329,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
                 * indirectly via kernel emulated PCI bridge driver.
                 */
                mvebu_pcie_setup_hw(port);
-               mvebu_pcie_set_local_dev_nr(port, 0);
+               mvebu_pcie_set_local_dev_nr(port, 1);
+               mvebu_pcie_set_local_bus_nr(port, 0);
        }
 
        pcie->nports = i;
index cc166c6..eb05cce 100644 (file)
@@ -99,11 +99,13 @@ struct vmd_irq {
  * @srcu:      SRCU struct for local synchronization.
  * @count:     number of child IRQs assigned to this vector; used to track
  *             sharing.
+ * @virq:      The underlying VMD Linux interrupt number
  */
 struct vmd_irq_list {
        struct list_head        irq_list;
        struct srcu_struct      srcu;
        unsigned int            count;
+       unsigned int            virq;
 };
 
 struct vmd_dev {
@@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
        struct msi_desc *desc = arg->desc;
        struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
        struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
-       unsigned int index, vector;
 
        if (!vmdirq)
                return -ENOMEM;
@@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
        INIT_LIST_HEAD(&vmdirq->node);
        vmdirq->irq = vmd_next_irq(vmd, desc);
        vmdirq->virq = virq;
-       index = index_from_irqs(vmd, vmdirq->irq);
-       vector = pci_irq_vector(vmd->dev, index);
 
-       irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+       irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
                            handle_untracked_irq, vmd, NULL);
        return 0;
 }
@@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
                        return err;
 
                INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
-               err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+               vmd->irqs[i].virq = pci_irq_vector(dev, i);
+               err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
                                       vmd_irq, IRQF_NO_THREAD,
                                       vmd->name, &vmd->irqs[i]);
                if (err)
@@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev)
        int i;
 
        for (i = 0; i < vmd->msix_count; i++)
-               devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+               devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
 
        return 0;
 }
@@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev)
        int err, i;
 
        for (i = 0; i < vmd->msix_count; i++) {
-               err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+               err = devm_request_irq(dev, vmd->irqs[i].virq,
                                       vmd_irq, IRQF_NO_THREAD,
                                       vmd->name, &vmd->irqs[i]);
                if (err)
index bda6308..604feeb 100644 (file)
@@ -166,6 +166,9 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
 {
        int ret, i;
 
+       for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+               irqs[i] = -1;
+
        /*
         * If we support PME but can't use MSI/MSI-X for it, we have to
         * fall back to INTx or other interrupts, e.g., a system shared
@@ -314,10 +317,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
  */
 int pcie_port_device_register(struct pci_dev *dev)
 {
-       int status, capabilities, irq_services, i, nr_service;
-       int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = {
-               [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1
-       };
+       int status, capabilities, i, nr_service;
+       int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
 
        /* Enable PCI Express port device */
        status = pci_enable_device(dev);
@@ -330,32 +331,18 @@ int pcie_port_device_register(struct pci_dev *dev)
                return 0;
 
        pci_set_master(dev);
-
-       irq_services = 0;
-       if (IS_ENABLED(CONFIG_PCIE_PME))
-               irq_services |= PCIE_PORT_SERVICE_PME;
-       if (IS_ENABLED(CONFIG_PCIEAER))
-               irq_services |= PCIE_PORT_SERVICE_AER;
-       if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
-               irq_services |= PCIE_PORT_SERVICE_HP;
-       if (IS_ENABLED(CONFIG_PCIE_DPC))
-               irq_services |= PCIE_PORT_SERVICE_DPC;
-       irq_services &= capabilities;
-
-       if (irq_services) {
-               /*
-                * Initialize service IRQs. Don't use service devices that
-                * require interrupts if there is no way to generate them.
-                * However, some drivers may have a polling mode (e.g.
-                * pciehp_poll_mode) that can be used in the absence of IRQs.
-                * Allow them to determine if that is to be used.
-                */
-               status = pcie_init_service_irqs(dev, irqs, irq_services);
-               if (status) {
-                       irq_services &= PCIE_PORT_SERVICE_HP;
-                       if (!irq_services)
-                               goto error_disable;
-               }
+       /*
+        * Initialize service irqs. Don't use service devices that
+        * require interrupts if there is no way to generate them.
+        * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+        * that can be used in the absence of irqs.  Allow them to determine
+        * if that is to be used.
+        */
+       status = pcie_init_service_irqs(dev, irqs, capabilities);
+       if (status) {
+               capabilities &= PCIE_PORT_SERVICE_HP;
+               if (!capabilities)
+                       goto error_disable;
        }
 
        /* Allocate child services if any */
index d2dd6a6..65f7f6b 100644 (file)
@@ -5344,11 +5344,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
  */
 static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
 {
-       if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
-           (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
-           (pdev->device == 0x7341 && pdev->revision != 0x00))
-               return;
-
        if (pdev->device == 0x15d8) {
                if (pdev->revision == 0xcf &&
                    pdev->subsystem_vendor == 0xea50 &&
@@ -5370,10 +5365,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
 /* AMD Iceland dGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
 /* AMD Navi10 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats);
 /* AMD Navi14 dGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
 /* AMD Raven platform iGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
 #endif /* CONFIG_PCI_ATS */
index cd2332b..fdbd64c 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/bitops.h>
+#include <linux/bits.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
@@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy)
                     (DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) |
                     (DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24));
        regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1,
-                    DIV_ROUND_UP(priv->config.clk_pre, temp));
+                    DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE));
 
        regmap_write(priv->regmap, MIPI_DSI_HS_TIM,
                     DIV_ROUND_UP(priv->config.hs_exit, temp) |
index f81e237..849c420 100644 (file)
@@ -97,8 +97,7 @@ config PHY_BRCM_USB
        depends on OF
        select GENERIC_PHY
        select SOC_BRCMSTB if ARCH_BRCMSTB
-       default ARCH_BCM4908
-       default ARCH_BRCMSTB
+       default ARCH_BCM4908 || ARCH_BRCMSTB
        help
          Enable this to support the Broadcom STB USB PHY.
          This driver is required by the USB XHCI, EHCI and OHCI
index 116fb23..0f1deb6 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/soc/brcmstb/brcmstb.h>
 #include <dt-bindings/phy/phy.h>
 #include <linux/mfd/syscon.h>
+#include <linux/suspend.h>
 
 #include "phy-brcm-usb-init.h"
 
@@ -70,12 +71,35 @@ struct brcm_usb_phy_data {
        int                     init_count;
        int                     wake_irq;
        struct brcm_usb_phy     phys[BRCM_USB_PHY_ID_MAX];
+       struct notifier_block   pm_notifier;
+       bool                    pm_active;
 };
 
 static s8 *node_reg_names[BRCM_REGS_MAX] = {
        "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
 };
 
+static int brcm_pm_notifier(struct notifier_block *notifier,
+                           unsigned long pm_event,
+                           void *unused)
+{
+       struct brcm_usb_phy_data *priv =
+               container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
+
+       switch (pm_event) {
+       case PM_HIBERNATION_PREPARE:
+       case PM_SUSPEND_PREPARE:
+               priv->pm_active = true;
+               break;
+       case PM_POST_RESTORE:
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
+               priv->pm_active = false;
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
 static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
 {
        struct phy *gphy = dev_id;
@@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
        struct brcm_usb_phy_data *priv =
                container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
 
+       if (priv->pm_active)
+               return 0;
+
        /*
         * Use a lock to make sure a second caller waits until
         * the base phy is inited before using it.
@@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
        struct brcm_usb_phy_data *priv =
                container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
 
+       if (priv->pm_active)
+               return 0;
+
        dev_dbg(&gphy->dev, "EXIT\n");
        if (phy->id == BRCM_USB_PHY_2_0)
                brcm_usb_uninit_eohci(&priv->ini);
@@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       priv->pm_notifier.notifier_call = brcm_pm_notifier;
+       register_pm_notifier(&priv->pm_notifier);
+
        mutex_init(&priv->mutex);
 
        /* make sure invert settings are correct */
@@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
 
 static int brcm_usb_phy_remove(struct platform_device *pdev)
 {
+       struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
+
        sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
+       unregister_pm_notifier(&priv->pm_notifier);
 
        return 0;
 }
@@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
        struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
 
        if (priv->init_count) {
+               dev_dbg(dev, "SUSPEND\n");
                priv->ini.wake_enabled = device_may_wakeup(dev);
                if (priv->phys[BRCM_USB_PHY_3_0].inited)
                        brcm_usb_uninit_xhci(&priv->ini);
@@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev)
         * Uninitialize anything that wasn't previously initialized.
         */
        if (priv->init_count) {
+               dev_dbg(dev, "RESUME\n");
                if (priv->wake_irq >= 0)
                        disable_irq_wake(priv->wake_irq);
                brcm_usb_init_common(&priv->ini);
index da24acd..e265647 100644 (file)
@@ -1338,7 +1338,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        const struct cdns_sierra_data *data;
        unsigned int id_value;
-       int i, ret, node = 0;
+       int ret, node = 0;
        void __iomem *base;
        struct device_node *dn = dev->of_node, *child;
 
@@ -1416,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
                        dev_err(dev, "failed to get reset %s\n",
                                child->full_name);
                        ret = PTR_ERR(sp->phys[node].lnk_rst);
-                       goto put_child2;
+                       of_node_put(child);
+                       goto put_control;
                }
 
                if (!sp->autoconf) {
@@ -1424,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
                        if (ret) {
                                dev_err(dev, "missing property in node %s\n",
                                        child->name);
-                               goto put_child;
+                               of_node_put(child);
+                               reset_control_put(sp->phys[node].lnk_rst);
+                               goto put_control;
                        }
                }
 
@@ -1434,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 
                if (IS_ERR(gphy)) {
                        ret = PTR_ERR(gphy);
-                       goto put_child;
+                       of_node_put(child);
+                       reset_control_put(sp->phys[node].lnk_rst);
+                       goto put_control;
                }
                sp->phys[node].phy = gphy;
                phy_set_drvdata(gphy, &sp->phys[node]);
@@ -1446,26 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        if (sp->num_lanes > SIERRA_MAX_LANES) {
                ret = -EINVAL;
                dev_err(dev, "Invalid lane configuration\n");
-               goto put_child2;
+               goto put_control;
        }
 
        /* If more than one subnode, configure the PHY as multilink */
        if (!sp->autoconf && sp->nsubnodes > 1) {
                ret = cdns_sierra_phy_configure_multilink(sp);
                if (ret)
-                       goto put_child2;
+                       goto put_control;
        }
 
        pm_runtime_enable(dev);
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-       return PTR_ERR_OR_ZERO(phy_provider);
-
-put_child:
-       node++;
-put_child2:
-       for (i = 0; i < node; i++)
-               reset_control_put(sp->phys[i].lnk_rst);
-       of_node_put(child);
+       if (IS_ERR(phy_provider)) {
+               ret = PTR_ERR(phy_provider);
+               goto put_control;
+       }
+
+       return 0;
+
+put_control:
+       while (--node >= 0)
+               reset_control_put(sp->phys[node].lnk_rst);
 clk_disable:
        cdns_sierra_phy_disable_clocks(sp);
        reset_control_assert(sp->apb_rst);
index 6d30710..8ee7682 100644 (file)
@@ -992,7 +992,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
                /* no efuse, ignore it */
                if (!instance->efuse_intr &&
                    !instance->efuse_rx_imp &&
-                   !instance->efuse_rx_imp) {
+                   !instance->efuse_tx_imp) {
                        dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
                        instance->efuse_sw_en = 0;
                        break;
index 288c9c6..ccb4045 100644 (file)
@@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
 
        cfg->clk_miss = 0;
        cfg->clk_post = 60000 + 52 * ui;
-       cfg->clk_pre = 8000;
+       cfg->clk_pre = 8;
        cfg->clk_prepare = 38000;
        cfg->clk_settle = 95000;
        cfg->clk_term_en = 0;
@@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg)
        if (cfg->clk_post < (60000 + 52 * ui))
                return -EINVAL;
 
-       if (cfg->clk_pre < 8000)
+       if (cfg->clk_pre < 8)
                return -EINVAL;
 
        if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000)
index 347dc79..630e01b 100644 (file)
@@ -5,6 +5,7 @@
  * Author: Wyon Bi <bivvy.bi@rock-chips.com>
  */
 
+#include <linux/bits.h>
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/iopoll.h>
@@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
         * The value of counter for HS Tclk-pre
         * Tclk-pre = Tpin_txbyteclkhs * value
         */
-       clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
+       clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
 
        /*
         * The value of counter for HS Tlpx Time
index 2ce9bfd..007a23c 100644 (file)
@@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc)
 
                ret = __stm32_usbphyc_pll_disable(usbphyc);
                if (ret)
-                       return ret;
+                       goto dec_n_pll_cons;
        }
 
        ret = stm32_usbphyc_regulators_enable(usbphyc);
index b3384c3..da546c3 100644 (file)
@@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = {
        { .val = 1, .div = 2, },
        { .val = 2, .div = 4, },
        { .val = 3, .div = 8, },
+       { /* sentinel */ },
 };
 
 static const struct wiz_clk_div_sel clk_div_sel[] = {
index f478d8a..9be9535 100644 (file)
 #define PROT_BUS_WIDTH_10              0x0
 #define PROT_BUS_WIDTH_20              0x1
 #define PROT_BUS_WIDTH_40              0x2
-#define PROT_BUS_WIDTH_SHIFT           2
+#define PROT_BUS_WIDTH_SHIFT(n)                ((n) * 2)
+#define PROT_BUS_WIDTH_MASK(n)         GENMASK((n) * 2 + 1, (n) * 2)
 
 /* Number of GT lanes */
 #define NUM_LANES                      4
@@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
 static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
 {
        struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+       u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
+       u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
 
        /* Set SGMII protocol TX and RX bus width to 10 bits. */
-       xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
-                    PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
-       xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
-                    PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+       xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
+       xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
 
        xpsgtr_bypass_scrambler_8b10b(gtr_phy);
 }
index 0bcd195..3ddaeff 100644 (file)
@@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
        { "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
        { "INT34C6", (kernel_ulong_t)&tglh_soc_data },
        { "INTC1055", (kernel_ulong_t)&tgllp_soc_data },
-       { "INTC1057", (kernel_ulong_t)&tgllp_soc_data },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
index 49e3268..ecab6bf 100644 (file)
@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua)
 {
        int i;
 
-       for (i = K210_PC_DRIVE_MAX; i; i--) {
+       for (i = K210_PC_DRIVE_MAX; i >= 0; i--) {
                if (k210_pinconf_drive_strength[i] <= max_strength_ua)
                        return i;
        }
@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
        case PIN_CONFIG_BIAS_PULL_UP:
                if (!arg)
                        return -EINVAL;
-               val |= K210_PC_PD;
+               val |= K210_PC_PU;
                break;
        case PIN_CONFIG_DRIVE_STRENGTH:
                arg *= 1000;
index 0b91215..266da41 100644 (file)
@@ -1164,6 +1164,7 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger)
 }
 
 static struct irq_chip starfive_irq_chip = {
+       .name = "StarFive GPIO",
        .irq_ack = starfive_irq_ack,
        .irq_mask = starfive_irq_mask,
        .irq_mask_ack = starfive_irq_mask_ack,
@@ -1308,7 +1309,6 @@ static int starfive_probe(struct platform_device *pdev)
        sfp->gc.ngpio = NR_GPIOS;
 
        starfive_irq_chip.parent_device = dev;
-       starfive_irq_chip.name = sfp->gc.label;
 
        sfp->gc.irq.chip = &starfive_irq_chip;
        sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
index 80d6750..1f40137 100644 (file)
 #include "../core.h"
 #include "pinctrl-sunxi.h"
 
+/*
+ * These lock classes tell lockdep that GPIO IRQs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key sunxi_pinctrl_irq_lock_class;
+static struct lock_class_key sunxi_pinctrl_irq_request_class;
+
 static struct irq_chip sunxi_pinctrl_edge_irq_chip;
 static struct irq_chip sunxi_pinctrl_level_irq_chip;
 
@@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
 {
        struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
 
-       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
+       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+                                           chip->base + offset, true);
 }
 
 static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
        struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
 
        sunxi_pinctrl_gpio_set(chip, offset, value);
-       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
+       return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+                                           chip->base + offset, false);
 }
 
 static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
        for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
                int irqno = irq_create_mapping(pctl->domain, i);
 
+               irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class,
+                                     &sunxi_pinctrl_irq_request_class);
                irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
                                         handle_edge_irq);
                irq_set_chip_data(irqno, pctl);
index abac3ee..444ec81 100644 (file)
@@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
        }
        bix->last_full_charg_capacity = ret;
 
-       /* get serial number */
+       /*
+        * Get serial number, on some devices (with unofficial replacement
+        * battery?) reading any of the serial number range addresses gets
+        * nacked in this case just leave the serial number empty.
+        */
        ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
                                            sizeof(buf), buf);
-       if (ret != sizeof(buf)) {
+       if (ret == -EREMOTEIO) {
+               /* no serial number available */
+       } else if (ret != sizeof(buf)) {
                dev_err(&client->dev, "Error reading serial no: %d\n", ret);
                return ret;
+       } else {
+               snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
        }
-       snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
 
        /* get cycle count */
        ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
index 4c72ba6..b1103f8 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
+#include <linux/pm_qos.h>
 #include <linux/rtc.h>
 #include <linux/suspend.h>
 #include <linux/seq_file.h>
@@ -85,6 +86,9 @@
 #define PMC_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
 
+/* QoS request for letting CPUs in idle states, but not the deepest */
+#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3
+
 #define SOC_SUBSYSTEM_IP_MAX   12
 #define DELAY_MIN_US           2000
 #define DELAY_MAX_US           3000
@@ -131,6 +135,7 @@ struct amd_pmc_dev {
        struct device *dev;
        struct pci_dev *rdev;
        struct mutex lock; /* generic mutex lock */
+       struct pm_qos_request amd_pmc_pm_qos_req;
 #if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbgfs_dir;
 #endif /* CONFIG_DEBUG_FS */
@@ -521,6 +526,14 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
        rc = rtc_alarm_irq_enable(rtc_device, 0);
        dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
 
+       /*
+        * Prevent CPUs from getting into deep idle states while sending OS_HINT
+        * which is otherwise generally safe to send when at least one of the CPUs
+        * is not in deep idle states.
+        */
+       cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY);
+       wake_up_all_idle_cpus();
+
        return rc;
 }
 
@@ -538,24 +551,31 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
        /* Activate CZN specific RTC functionality */
        if (pdev->cpu_id == AMD_CPU_ID_CZN) {
                rc = amd_pmc_verify_czn_rtc(pdev, &arg);
-               if (rc < 0)
-                       return rc;
+               if (rc)
+                       goto fail;
        }
 
        /* Dump the IdleMask before we send hint to SMU */
        amd_pmc_idlemask_read(pdev, dev, NULL);
        msg = amd_pmc_get_os_hint(pdev);
        rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
-       if (rc)
+       if (rc) {
                dev_err(pdev->dev, "suspend failed\n");
+               goto fail;
+       }
 
        if (enable_stb)
                rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
-       if (rc) {
+       if (rc) {
                dev_err(pdev->dev, "error writing to STB\n");
-               return rc;
+               goto fail;
        }
 
+       return 0;
+fail:
+       if (pdev->cpu_id == AMD_CPU_ID_CZN)
+               cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+                                               PM_QOS_DEFAULT_VALUE);
        return rc;
 }
 
@@ -579,12 +599,15 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
        /* Write data incremented by 1 to distinguish in stb_read */
        if (enable_stb)
                rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
-       if (rc) {
+       if (rc)
                dev_err(pdev->dev, "error writing to STB\n");
-               return rc;
-       }
 
-       return 0;
+       /* Restore the QoS request back to defaults if it was set */
+       if (pdev->cpu_id == AMD_CPU_ID_CZN)
+               cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+                                               PM_QOS_DEFAULT_VALUE);
+
+       return rc;
 }
 
 static const struct dev_pm_ops amd_pmc_pm_ops = {
@@ -722,6 +745,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
        amd_pmc_get_smu_version(dev);
        platform_set_drvdata(pdev, dev);
        amd_pmc_dbgfs_register(dev);
+       cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
        return 0;
 
 err_pci_dev_put:
index a3b83b2..2104a26 100644 (file)
@@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
 
        err = fan_curve_get_factory_default(asus, fan_dev);
        if (err) {
-               if (err == -ENODEV)
+               if (err == -ENODEV || err == -ENODATA)
                        return 0;
                return err;
        }
index f93d437..525f09a 100644 (file)
@@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
        .dev_id = "i2c-INT347A:00",
        .table = {
                GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW)
+               GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+               { }
        }
 };
 
index bd04548..3424b08 100644 (file)
@@ -8703,6 +8703,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (4nd gen) */
        TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),  /* P15 (1st gen) / P15v (1st gen) */
        TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),  /* X1 Carbon (9th gen) */
+       TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL),  /* T15g (2nd gen) */
        TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
 };
 
index b274942..01ad84f 100644 (file)
@@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
                                                BQ256XX_WDT_BIT_SHIFT);
 
        ret = power_supply_get_battery_info(bq->charger, &bat_info);
+       if (ret == -ENOMEM)
+               return ret;
+
        if (ret) {
                dev_warn(bq->dev, "battery info missing, default values will be applied\n");
 
index 0c87ad0..728e2a6 100644 (file)
@@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client)
        if (ret) {
                /* Allocate an empty battery */
                cw_bat->battery = devm_kzalloc(&client->dev,
-                                              sizeof(cw_bat->battery),
+                                              sizeof(*cw_bat->battery),
                                               GFP_KERNEL);
                if (!cw_bat->battery)
                        return -ENOMEM;
index 0f1b5a7..17ad5f0 100644 (file)
@@ -607,7 +607,7 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
 }
 
 static void
-__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
 {
        u32 select, ctrl;
 
@@ -615,7 +615,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
        iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
 
        iowrite32(adj_val, &bp->reg->offset_ns);
-       iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+       iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns);
 
        ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
        iowrite32(ctrl, &bp->reg->ctrl);
@@ -624,6 +624,22 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
        iowrite32(select >> 16, &bp->reg->select);
 }
 
+static void
+ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+{
+       struct timespec64 ts;
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&bp->lock, flags);
+       err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
+       if (likely(!err)) {
+               timespec64_add_ns(&ts, delta_ns);
+               __ptp_ocp_settime_locked(bp, &ts);
+       }
+       spin_unlock_irqrestore(&bp->lock, flags);
+}
+
 static int
 ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
 {
@@ -631,6 +647,11 @@ ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
        unsigned long flags;
        u32 adj_ns, sign;
 
+       if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) {
+               ptp_ocp_adjtime_coarse(bp, delta_ns);
+               return 0;
+       }
+
        sign = delta_ns < 0 ? BIT(31) : 0;
        adj_ns = sign ? -delta_ns : delta_ns;
 
index 86aa414..d255397 100644 (file)
@@ -6014,9 +6014,8 @@ core_initcall(regulator_init);
 static int regulator_late_cleanup(struct device *dev, void *data)
 {
        struct regulator_dev *rdev = dev_to_rdev(dev);
-       const struct regulator_ops *ops = rdev->desc->ops;
        struct regulation_constraints *c = rdev->constraints;
-       int enabled, ret;
+       int ret;
 
        if (c && c->always_on)
                return 0;
@@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data)
        if (rdev->use_count)
                goto unlock;
 
-       /* If we can't read the status assume it's always on. */
-       if (ops->is_enabled)
-               enabled = ops->is_enabled(rdev);
-       else
-               enabled = 1;
-
-       /* But if reading the status failed, assume that it's off. */
-       if (enabled <= 0)
+       /* If reading the status failed, assume that it's off. */
+       if (_regulator_is_enabled(rdev) <= 0)
                goto unlock;
 
        if (have_full_constraints()) {
index 6f21223..eb9df48 100644 (file)
@@ -87,16 +87,16 @@ static struct da9121_range da9121_3A_1phase_current = {
 };
 
 static struct da9121_range da914x_40A_4phase_current = {
-       .val_min = 14000000,
-       .val_max = 80000000,
-       .val_stp =  2000000,
+       .val_min = 26000000,
+       .val_max = 78000000,
+       .val_stp =  4000000,
        .reg_min = 1,
        .reg_max = 14,
 };
 
 static struct da9121_range da914x_20A_2phase_current = {
-       .val_min =  7000000,
-       .val_max = 40000000,
+       .val_min = 13000000,
+       .val_max = 39000000,
        .val_stp =  2000000,
        .reg_min = 1,
        .reg_max = 14,
@@ -561,7 +561,7 @@ static const struct regulator_desc da9217_reg = {
 };
 
 #define DA914X_MIN_MV          500
-#define DA914X_MAX_MV          1000
+#define DA914X_MAX_MV          1300
 #define DA914X_STEP_MV         10
 #define DA914X_MIN_SEL         (DA914X_MIN_MV / DA914X_STEP_MV)
 #define DA914X_N_VOLTAGES      (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \
@@ -585,10 +585,6 @@ static const struct regulator_desc da9141_reg = {
        .vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
        .enable_reg = DA9121_REG_BUCK_BUCK1_0,
        .enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
-       /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */
-       .ramp_delay = 20000,
-       /* tBUCK_EN */
-       .enable_time = 20,
 };
 
 static const struct regulator_desc da9142_reg = {
index cd938a2..3b1cd0c 100644 (file)
@@ -1180,7 +1180,7 @@ static int io_subchannel_chp_event(struct subchannel *sch,
                        else
                                path_event[chpid] = PE_NONE;
                }
-               if (cdev)
+               if (cdev && cdev->drv && cdev->drv->path_event)
                        cdev->drv->path_event(cdev, path_event);
                break;
        }
index 4878c94..98cabe0 100644 (file)
@@ -592,6 +592,7 @@ struct lpfc_vport {
 #define FC_VPORT_LOGO_RCVD      0x200    /* LOGO received on vport */
 #define FC_RSCN_DISCOVERY       0x400   /* Auth all devices after RSCN */
 #define FC_LOGO_RCVD_DID_CHNG   0x800    /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME        0x1000   /* Don't send NVME PRLI */
 #define FC_SCSI_SCAN_TMO        0x4000  /* scsi scan timer running */
 #define FC_ABORT_DISCOVERY      0x8000  /* we want to abort discovery */
 #define FC_NDISC_ACTIVE         0x10000         /* NPort discovery active */
@@ -1161,6 +1162,16 @@ struct lpfc_hba {
        uint32_t cfg_hostmem_hgp;
        uint32_t cfg_log_verbose;
        uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP  1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
        uint32_t cfg_aer_support;
        uint32_t cfg_sriov_nr_virtfn;
        uint32_t cfg_request_firmware_upgrade;
@@ -1182,9 +1193,6 @@ struct lpfc_hba {
        uint32_t cfg_ras_fwlog_func;
        uint32_t cfg_enable_bbcr;       /* Enable BB Credit Recovery */
        uint32_t cfg_enable_dpp;        /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP  1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
        uint32_t cfg_enable_pbde;
        uint32_t cfg_enable_mi;
        struct nvmet_fc_target_port *targetport;
index 7a7f17d..fa84152 100644 (file)
@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
        pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
        pmboxq->u.mb.mbxOwner = OWN_HOST;
 
+       if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+               vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
        mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
 
        if ((mbxstatus == MBX_SUCCESS) &&
@@ -3978,8 +3981,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
  *                    3 - register both FCP and NVME
  * Supported values are [1,3]. Default value is 3
  */
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
-           LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+           LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
            "Enable FC4 Protocol support - FCP / NVME");
 
 /*
index db5ccae..f936833 100644 (file)
@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
 
                /* FLOGI failed, so there is no fabric */
                spin_lock_irq(shost->host_lock);
-               vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+                                   FC_PT2PT_NO_NVME);
                spin_unlock_irq(shost->host_lock);
 
                /* If private loop, then allow max outstanding els to be
@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* Added for Vendor specifc support
                 * Just keep retrying for these Rsn / Exp codes
                 */
+               if ((vport->fc_flag & FC_PT2PT) &&
+                   cmd == ELS_CMD_NVMEPRLI) {
+                       switch (stat.un.b.lsRjtRsnCode) {
+                       case LSRJT_UNABLE_TPC:
+                       case LSRJT_INVALID_CMD:
+                       case LSRJT_LOGICAL_ERR:
+                       case LSRJT_CMD_UNSUPPORTED:
+                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+                                                "0168 NVME PRLI LS_RJT "
+                                                "reason %x port doesn't "
+                                                "support NVME, disabling NVME\n",
+                                                stat.un.b.lsRjtRsnCode);
+                               retry = 0;
+                               vport->fc_flag |= FC_PT2PT_NO_NVME;
+                               goto out_retry;
+                       }
+               }
                switch (stat.un.b.lsRjtRsnCode) {
                case LSRJT_UNABLE_TPC:
                        /* The driver has a VALID PLOGI but the rport has
index a56f01f..558f7d2 100644 (file)
@@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
                }
                if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
                    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "3143 Port Down: Firmware Update "
                                        "Detected\n");
                        en_rn_msg = false;
index 7d717a4..fdf5e77 100644 (file)
@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
                         * is configured try it.
                         */
                        ndlp->nlp_fc4_type |= NLP_FC4_FCP;
-                       if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-                           (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+                       if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+                           (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+                           vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
                                ndlp->nlp_fc4_type |= NLP_FC4_NVME;
                                /* We need to update the localport also */
                                lpfc_nvme_update_localport(vport);
index 1bc0db5..430abeb 100644 (file)
@@ -13363,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
        uint32_t uerr_sta_hi, uerr_sta_lo;
        uint32_t if_type, portsmphr;
        struct lpfc_register portstat_reg;
+       u32 logmask;
 
        /*
         * For now, use the SLI4 device internal unrecoverable error
@@ -13413,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
                                readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
                        phba->work_status[1] =
                                readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
-                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+                       logmask = LOG_TRACE_EVENT;
+                       if (phba->work_status[0] ==
+                               SLIPORT_ERR1_REG_ERR_CODE_2 &&
+                           phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+                               logmask = LOG_SLI;
+                       lpfc_printf_log(phba, KERN_ERR, logmask,
                                        "2885 Port Status Event: "
                                        "port status reg 0x%x, "
                                        "port smphr reg 0x%x, "
index 5916ed7..4eb89aa 100644 (file)
@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
                        qedi_cmd->list_tmf_work = NULL;
                }
        }
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
 
-       if (!found) {
-               spin_unlock_bh(&qedi_conn->tmf_work_lock);
+       if (!found)
                goto check_cleanup_reqs;
-       }
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
        qedi_cmd->state = CLEANUP_RECV;
 unlock:
        spin_unlock_bh(&conn->session->back_lock);
-       spin_unlock_bh(&qedi_conn->tmf_work_lock);
        wake_up_interruptible(&qedi_conn->wait_queue);
        return;
 
index 50b12d6..9349557 100644 (file)
@@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
                        break;
                case HCTX_TYPE_READ:
                        map->nr_queues = 0;
-                       break;
+                       continue;
                default:
                        WARN_ON_ONCE(true);
                }
index 72771e0..258894e 100644 (file)
@@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
        }
 
        lpc_ctrl->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(lpc_ctrl->clk)) {
-               dev_err(dev, "couldn't get clock\n");
-               return PTR_ERR(lpc_ctrl->clk);
-       }
+       if (IS_ERR(lpc_ctrl->clk))
+               return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+                                    "couldn't get clock\n");
        rc = clk_prepare_enable(lpc_ctrl->clk);
        if (rc) {
                dev_err(dev, "couldn't enable clock\n");
index 072473a..5ed2fc1 100644 (file)
@@ -28,7 +28,6 @@ struct fsl_soc_die_attr {
 static struct guts *guts;
 static struct soc_device_attribute soc_dev_attr;
 static struct soc_device *soc_dev;
-static struct device_node *root;
 
 
 /* SoC die attribute definition for QorIQ platform */
@@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void)
 
 static int fsl_guts_probe(struct platform_device *pdev)
 {
-       struct device_node *np = pdev->dev.of_node;
+       struct device_node *root, *np = pdev->dev.of_node;
        struct device *dev = &pdev->dev;
        const struct fsl_soc_die_attr *soc_die;
        const char *machine;
@@ -159,8 +158,14 @@ static int fsl_guts_probe(struct platform_device *pdev)
        root = of_find_node_by_path("/");
        if (of_property_read_string(root, "model", &machine))
                of_property_read_string_index(root, "compatible", 0, &machine);
-       if (machine)
-               soc_dev_attr.machine = machine;
+       if (machine) {
+               soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+               if (!soc_dev_attr.machine) {
+                       of_node_put(root);
+                       return -ENOMEM;
+               }
+       }
+       of_node_put(root);
 
        svr = fsl_guts_get_svr();
        soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -195,7 +200,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
 static int fsl_guts_remove(struct platform_device *dev)
 {
        soc_device_unregister(soc_dev);
-       of_node_put(root);
        return 0;
 }
 
index 4d38c80..b3c226e 100644 (file)
@@ -147,7 +147,7 @@ EXPORT_SYMBOL(qe_issue_cmd);
  * memory mapped space.
  * The BRG clock is the QE clock divided by 2.
  * It was set up long ago during the initial boot phase and is
- * is given to us.
+ * given to us.
  * Baud rate clocks are zero-based in the driver code (as that maps
  * to port numbers). Documentation uses 1-based numbering.
  */
@@ -421,7 +421,7 @@ static void qe_upload_microcode(const void *base,
 
        for (i = 0; i < be32_to_cpu(ucode->count); i++)
                iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
-       
+
        /* Set I-RAM Ready Register */
        iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
 }
index e277c82..a5e2d0e 100644 (file)
@@ -35,6 +35,8 @@ int par_io_init(struct device_node *np)
        if (ret)
                return ret;
        par_io = ioremap(res.start, resource_size(&res));
+       if (!par_io)
+               return -ENOMEM;
 
        if (!of_property_read_u32(np, "num-ports", &num_ports))
                num_par_io_ports = num_ports;
index 3e59d47..3cb1230 100644 (file)
@@ -382,7 +382,8 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
        return 0;
 
 out_clk_disable:
-       clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+       if (!domain->keep_clocks)
+               clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
 
        return ret;
 }
index 670cc82..ca75b14 100644 (file)
@@ -411,17 +411,12 @@ out:
        return ret;
 }
 
-static int init_clks(struct platform_device *pdev, struct clk **clk)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
 {
        int i;
 
-       for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
+       for (i = CLK_NONE + 1; i < CLK_MAX; i++)
                clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
-               if (IS_ERR(clk[i]))
-                       return PTR_ERR(clk[i]);
-       }
-
-       return 0;
 }
 
 static struct scp *init_scp(struct platform_device *pdev,
@@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev,
 {
        struct genpd_onecell_data *pd_data;
        struct resource *res;
-       int i, j, ret;
+       int i, j;
        struct scp *scp;
        struct clk *clk[CLK_MAX];
 
@@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev,
 
        pd_data->num_domains = num;
 
-       ret = init_clks(pdev, clk);
-       if (ret)
-               return ERR_PTR(ret);
+       init_clks(pdev, clk);
 
        for (i = 0; i < num; i++) {
                struct scp_domain *scpd = &scp->domains[i];
index a9f8b22..02e3195 100644 (file)
@@ -31,7 +31,7 @@ config EXYNOS_USI
        help
          Enable support for USI block. USI (Universal Serial Interface) is an
          IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
-         ExynosAutoV0. USI block can be configured to provide one of the
+         ExynosAutoV9. USI block can be configured to provide one of the
          following serial protocols: UART, SPI or High Speed I2C.
 
          This driver allows one to configure USI for desired protocol, which
index 553b6b9..c6a1bb0 100644 (file)
@@ -585,6 +585,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
 {
        struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
 
+       if (atomic_read(&rs->state) & RXDMA)
+               dmaengine_terminate_sync(ctlr->dma_rx);
+       if (atomic_read(&rs->state) & TXDMA)
+               dmaengine_terminate_sync(ctlr->dma_tx);
+       atomic_set(&rs->state, 0);
+       spi_enable_chip(rs, false);
        rs->slave_abort = true;
        spi_finalize_current_transfer(ctlr);
 
@@ -654,7 +660,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
        struct spi_controller *ctlr;
        struct resource *mem;
        struct device_node *np = pdev->dev.of_node;
-       u32 rsd_nsecs;
+       u32 rsd_nsecs, num_cs;
        bool slave_mode;
 
        slave_mode = of_property_read_bool(np, "spi-slave");
@@ -764,8 +770,9 @@ static int rockchip_spi_probe(struct platform_device *pdev)
                 * rk spi0 has two native cs, spi1..5 one cs only
                 * if num-cs is missing in the dts, default to 1
                 */
-               if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect))
-                       ctlr->num_chipselect = 1;
+               if (of_property_read_u32(np, "num-cs", &num_cs))
+                       num_cs = 1;
+               ctlr->num_chipselect = num_cs;
                ctlr->use_gpio_descriptors = true;
        }
        ctlr->dev.of_node = pdev->dev.of_node;
index cfa222c..78f31b6 100644 (file)
@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
 
        if (op->dummy.nbytes) {
                tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+               if (!tmpbuf)
+                       return -ENOMEM;
+
                memset(tmpbuf, 0xff, op->dummy.nbytes);
                reinit_completion(&xqspi->data_completion);
                xqspi->txbuf = tmpbuf;
index abe9395..861a154 100644 (file)
@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par)
 {
        int rc;
 
+       par->fbtftops.reset(par);
+
        rc = init_tearing_effect_line(par);
        if (rc)
                return rc;
index 4cdec34..55677ef 100644 (file)
@@ -334,7 +334,10 @@ static int __init fbtft_driver_module_init(void)                           \
        ret = spi_register_driver(&fbtft_driver_spi_driver);               \
        if (ret < 0)                                                       \
                return ret;                                                \
-       return platform_driver_register(&fbtft_driver_platform_driver);    \
+       ret = platform_driver_register(&fbtft_driver_platform_driver);     \
+       if (ret < 0)                                                       \
+               spi_unregister_driver(&fbtft_driver_spi_driver);           \
+       return ret;                                                        \
 }                                                                          \
                                                                           \
 static void __exit fbtft_driver_module_exit(void)                          \
index 6759a62..3a2e458 100644 (file)
@@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
 
        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
 
+       rcu_read_lock();
        service = handle_to_service(handle);
-       if (WARN_ON(!service))
+       if (WARN_ON(!service)) {
+               rcu_read_unlock();
                return VCHIQ_SUCCESS;
+       }
 
        user_service = (struct user_service *)service->base.userdata;
        instance = user_service->instance;
 
-       if (!instance || instance->closing)
+       if (!instance || instance->closing) {
+               rcu_read_unlock();
                return VCHIQ_SUCCESS;
+       }
+
+       /*
+        * As hopping around different synchronization mechanism,
+        * taking an extra reference results in simpler implementation.
+        */
+       vchiq_service_get(service);
+       rcu_read_unlock();
 
        vchiq_log_trace(vchiq_arm_log_level,
                        "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
@@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
                                                        bulk_userdata);
                                if (status != VCHIQ_SUCCESS) {
                                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+                                       vchiq_service_put(service);
                                        return status;
                                }
                        }
@@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
                        if (wait_for_completion_interruptible(&user_service->remove_event)) {
                                vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+                               vchiq_service_put(service);
                                return VCHIQ_RETRY;
                        } else if (instance->closing) {
                                vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+                               vchiq_service_put(service);
                                return VCHIQ_ERROR;
                        }
                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
                header = NULL;
        }
        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+       vchiq_service_put(service);
 
        if (skip_completion)
                return VCHIQ_SUCCESS;
index 1ca3208..17a6f51 100644 (file)
@@ -158,6 +158,7 @@ void optee_remove_common(struct optee *optee)
        optee_unregister_devices();
 
        optee_notif_uninit(optee);
+       teedev_close_context(optee->ctx);
        /*
         * The two devices have to be unregistered before we can free the
         * other resources.
index 20a1b1a..f744ab1 100644 (file)
@@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
  */
 
 static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+                                             struct optee *optee,
                                              struct optee_msg_arg *arg)
 {
        struct tee_shm *shm;
@@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
                shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
                break;
        case OPTEE_RPC_SHM_TYPE_KERNEL:
-               shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+               shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
                                    TEE_SHM_MAPPED | TEE_SHM_PRIV);
                break;
        default:
@@ -493,14 +494,13 @@ err_bad_param:
 }
 
 static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+                                   struct optee *optee,
                                    struct optee_msg_arg *arg)
 {
-       struct optee *optee = tee_get_drvdata(ctx->teedev);
-
        arg->ret_origin = TEEC_ORIGIN_COMMS;
        switch (arg->cmd) {
        case OPTEE_RPC_CMD_SHM_ALLOC:
-               handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+               handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
                break;
        case OPTEE_RPC_CMD_SHM_FREE:
                handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
@@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
        }
 }
 
-static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
-                                struct optee_msg_arg *arg)
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+                                u32 cmd, struct optee_msg_arg *arg)
 {
        switch (cmd) {
        case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
-               handle_ffa_rpc_func_cmd(ctx, arg);
+               handle_ffa_rpc_func_cmd(ctx, optee, arg);
                break;
        case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
                /* Interrupt delivered by now */
@@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
                 * above.
                 */
                cond_resched();
-               optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+               optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
                cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
                data->data0 = cmd;
                data->data1 = 0;
@@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
                .data2 = (u32)(shm->sec_world_id >> 32),
                .data3 = shm->offset,
        };
-       struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
-       unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
-       struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+       struct optee_msg_arg *arg;
+       unsigned int rpc_arg_offs;
+       struct optee_msg_arg *rpc_arg;
+
+       arg = tee_shm_get_va(shm, 0);
+       if (IS_ERR(arg))
+               return PTR_ERR(arg);
+
+       rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+       rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+       if (IS_ERR(rpc_arg))
+               return PTR_ERR(rpc_arg);
 
        return optee_ffa_yielding_call(ctx, &data, rpc_arg);
 }
@@ -793,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
 {
        const struct ffa_dev_ops *ffa_ops;
        unsigned int rpc_arg_count;
+       struct tee_shm_pool *pool;
        struct tee_device *teedev;
+       struct tee_context *ctx;
        struct optee *optee;
        int rc;
 
@@ -813,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
        if (!optee)
                return -ENOMEM;
 
-       optee->pool = optee_ffa_config_dyn_shm();
-       if (IS_ERR(optee->pool)) {
-               rc = PTR_ERR(optee->pool);
-               optee->pool = NULL;
-               goto err;
+       pool = optee_ffa_config_dyn_shm();
+       if (IS_ERR(pool)) {
+               rc = PTR_ERR(pool);
+               goto err_free_optee;
        }
+       optee->pool = pool;
 
        optee->ops = &optee_ffa_ops;
        optee->ffa.ffa_dev = ffa_dev;
@@ -829,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
                                  optee);
        if (IS_ERR(teedev)) {
                rc = PTR_ERR(teedev);
-               goto err;
+               goto err_free_pool;
        }
        optee->teedev = teedev;
 
@@ -837,50 +848,59 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
                                  optee);
        if (IS_ERR(teedev)) {
                rc = PTR_ERR(teedev);
-               goto err;
+               goto err_unreg_teedev;
        }
        optee->supp_teedev = teedev;
 
        rc = tee_device_register(optee->teedev);
        if (rc)
-               goto err;
+               goto err_unreg_supp_teedev;
 
        rc = tee_device_register(optee->supp_teedev);
        if (rc)
-               goto err;
+               goto err_unreg_supp_teedev;
 
        rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
        if (rc)
-               goto err;
+               goto err_unreg_supp_teedev;
        mutex_init(&optee->ffa.mutex);
        mutex_init(&optee->call_queue.mutex);
        INIT_LIST_HEAD(&optee->call_queue.waiters);
        optee_supp_init(&optee->supp);
        ffa_dev_set_drvdata(ffa_dev, optee);
-       rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
-       if (rc) {
-               optee_ffa_remove(ffa_dev);
-               return rc;
+       ctx = teedev_open(optee->teedev);
+       if (IS_ERR(ctx)) {
+               rc = PTR_ERR(ctx);
+               goto err_rhashtable_free;
        }
+       optee->ctx = ctx;
+       rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+       if (rc)
+               goto err_close_ctx;
 
        rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
-       if (rc) {
-               optee_ffa_remove(ffa_dev);
-               return rc;
-       }
+       if (rc)
+               goto err_unregister_devices;
 
        pr_info("initialized driver\n");
        return 0;
-err:
-       /*
-        * tee_device_unregister() is safe to call even if the
-        * devices hasn't been registered with
-        * tee_device_register() yet.
-        */
+
+err_unregister_devices:
+       optee_unregister_devices();
+       optee_notif_uninit(optee);
+err_close_ctx:
+       teedev_close_context(ctx);
+err_rhashtable_free:
+       rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+       optee_supp_uninit(&optee->supp);
+       mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
        tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
        tee_device_unregister(optee->teedev);
-       if (optee->pool)
-               tee_shm_pool_free(optee->pool);
+err_free_pool:
+       tee_shm_pool_free(pool);
+err_free_optee:
        kfree(optee);
        return rc;
 }
index a28fa03..0521284 100644 (file)
@@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key)
 
 void optee_notif_uninit(struct optee *optee)
 {
-       kfree(optee->notif.bitmap);
+       bitmap_free(optee->notif.bitmap);
 }
index 46f74ab..92bc47b 100644 (file)
@@ -53,7 +53,6 @@ struct optee_call_queue {
 
 struct optee_notif {
        u_int max_key;
-       struct tee_context *ctx;
        /* Serializes access to the elements below in this struct */
        spinlock_t lock;
        struct list_head db;
@@ -134,9 +133,10 @@ struct optee_ops {
 /**
  * struct optee - main service struct
  * @supp_teedev:       supplicant device
+ * @teedev:            client device
  * @ops:               internal callbacks for different ways to reach secure
  *                     world
- * @teedev:            client device
+ * @ctx:               driver internal TEE context
  * @smc:               specific to SMC ABI
  * @ffa:               specific to FF-A ABI
  * @call_queue:                queue of threads waiting to call @invoke_fn
@@ -152,6 +152,7 @@ struct optee {
        struct tee_device *supp_teedev;
        struct tee_device *teedev;
        const struct optee_ops *ops;
+       struct tee_context *ctx;
        union {
                struct optee_smc smc;
                struct optee_ffa ffa;
index 449d6a7..c517d31 100644 (file)
@@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
        p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
        p->u.memref.shm = shm;
 
-       /* Check that the memref is covered by the shm object */
-       if (p->u.memref.size) {
-               size_t o = p->u.memref.shm_offs +
-                          p->u.memref.size - 1;
-
-               rc = tee_shm_get_pa(shm, o, NULL);
-               if (rc)
-                       return rc;
-       }
-
        return 0;
 }
 
@@ -622,6 +612,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
 }
 
 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+                                         struct optee *optee,
                                          struct optee_msg_arg *arg,
                                          struct optee_call_ctx *call_ctx)
 {
@@ -651,7 +642,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
                shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
                break;
        case OPTEE_RPC_SHM_TYPE_KERNEL:
-               shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+               shm = tee_shm_alloc(optee->ctx, sz,
+                                   TEE_SHM_MAPPED | TEE_SHM_PRIV);
                break;
        default:
                arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -747,7 +739,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
        switch (arg->cmd) {
        case OPTEE_RPC_CMD_SHM_ALLOC:
                free_pages_list(call_ctx);
-               handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+               handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
                break;
        case OPTEE_RPC_CMD_SHM_FREE:
                handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -776,7 +768,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
 
        switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
        case OPTEE_SMC_RPC_FUNC_ALLOC:
-               shm = tee_shm_alloc(ctx, param->a1,
+               shm = tee_shm_alloc(optee->ctx, param->a1,
                                    TEE_SHM_MAPPED | TEE_SHM_PRIV);
                if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
                        reg_pair_from_64(&param->a1, &param->a2, pa);
@@ -954,57 +946,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
 {
        struct optee *optee = dev_id;
 
-       optee_smc_do_bottom_half(optee->notif.ctx);
+       optee_smc_do_bottom_half(optee->ctx);
 
        return IRQ_HANDLED;
 }
 
 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
 {
-       struct tee_context *ctx;
        int rc;
 
-       ctx = teedev_open(optee->teedev);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
-
-       optee->notif.ctx = ctx;
        rc = request_threaded_irq(irq, notif_irq_handler,
                                  notif_irq_thread_fn,
                                  0, "optee_notification", optee);
        if (rc)
-               goto err_close_ctx;
+               return rc;
 
        optee->smc.notif_irq = irq;
 
        return 0;
-
-err_close_ctx:
-       teedev_close_context(optee->notif.ctx);
-       optee->notif.ctx = NULL;
-
-       return rc;
 }
 
 static void optee_smc_notif_uninit_irq(struct optee *optee)
 {
-       if (optee->notif.ctx) {
-               optee_smc_stop_async_notif(optee->notif.ctx);
+       if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+               optee_smc_stop_async_notif(optee->ctx);
                if (optee->smc.notif_irq) {
                        free_irq(optee->smc.notif_irq, optee);
                        irq_dispose_mapping(optee->smc.notif_irq);
                }
-
-               /*
-                * The thread normally working with optee->notif.ctx was
-                * stopped with free_irq() above.
-                *
-                * Note we're not using teedev_close_context() or
-                * tee_client_close_context() since we have already called
-                * tee_device_put() while initializing to avoid a circular
-                * reference counting.
-                */
-               teedev_close_context(optee->notif.ctx);
        }
 }
 
@@ -1366,6 +1335,7 @@ static int optee_probe(struct platform_device *pdev)
        struct optee *optee = NULL;
        void *memremaped_shm = NULL;
        struct tee_device *teedev;
+       struct tee_context *ctx;
        u32 max_notif_value;
        u32 sec_caps;
        int rc;
@@ -1446,9 +1416,15 @@ static int optee_probe(struct platform_device *pdev)
        optee->pool = pool;
 
        platform_set_drvdata(pdev, optee);
+       ctx = teedev_open(optee->teedev);
+       if (IS_ERR(ctx)) {
+               rc = PTR_ERR(ctx);
+               goto err_supp_uninit;
+       }
+       optee->ctx = ctx;
        rc = optee_notif_init(optee, max_notif_value);
        if (rc)
-               goto err_supp_uninit;
+               goto err_close_ctx;
 
        if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
                unsigned int irq;
@@ -1496,6 +1472,8 @@ err_disable_shm_cache:
        optee_unregister_devices();
 err_notif_uninit:
        optee_notif_uninit(optee);
+err_close_ctx:
+       teedev_close_context(ctx);
 err_supp_uninit:
        optee_supp_uninit(&optee->supp);
        mutex_destroy(&optee->call_queue.mutex);
index 72acb1f..4f47881 100644 (file)
@@ -404,6 +404,10 @@ static void int3400_notify(acpi_handle handle,
        thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
        thermal_prop[4] = NULL;
        kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
+       kfree(thermal_prop[0]);
+       kfree(thermal_prop[1]);
+       kfree(thermal_prop[2]);
+       kfree(thermal_prop[3]);
 }
 
 static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
index a16dd4d..73e68cc 100644 (file)
@@ -419,11 +419,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
        for (i = 0; i < tz->trips; i++) {
 
                enum thermal_trip_type type;
-               int temp, hyst;
+               int temp, hyst = 0;
 
                tz->ops->get_trip_type(tz, i, &type);
                tz->ops->get_trip_temp(tz, i, &temp);
-               tz->ops->get_trip_hyst(tz, i, &hyst);
+               if (tz->ops->get_trip_hyst)
+                       tz->ops->get_trip_hyst(tz, i, &hyst);
 
                if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
                    nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) ||
index 0b1808e..fa92f72 100644 (file)
@@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
                modembits |= MDM_RTR;
        if (dlci->modem_tx & TIOCM_RI)
                modembits |= MDM_IC;
-       if (dlci->modem_tx & TIOCM_CD)
+       if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
                modembits |= MDM_DV;
        return modembits;
 }
@@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
  *     gsm_print_packet        -       display a frame for debug
  *     @hdr: header to print before decode
  *     @addr: address EA from the frame
- *     @cr: C/R bit from the frame
+ *     @cr: C/R bit seen as initiator
  *     @control: control including PF bit
  *     @data: following data bytes
  *     @dlen: length of data
@@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
  *     gsm_send        -       send a control frame
  *     @gsm: our GSM mux
  *     @addr: address for control frame
- *     @cr: command/response bit
+ *     @cr: command/response bit seen as initiator
  *     @control:  control byte including PF bit
  *
  *     Format up and transmit a control frame. These do not go via the
@@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
        int len;
        u8 cbuf[10];
        u8 ibuf[3];
+       int ocr;
+
+       /* toggle C/R coding if not initiator */
+       ocr = cr ^ (gsm->initiator ? 0 : 1);
 
        switch (gsm->encoding) {
        case 0:
                cbuf[0] = GSM0_SOF;
-               cbuf[1] = (addr << 2) | (cr << 1) | EA;
+               cbuf[1] = (addr << 2) | (ocr << 1) | EA;
                cbuf[2] = control;
                cbuf[3] = EA;   /* Length of data = 0 */
                cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
@@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
        case 1:
        case 2:
                /* Control frame + packing (but not frame stuffing) in mode 1 */
-               ibuf[0] = (addr << 2) | (cr << 1) | EA;
+               ibuf[0] = (addr << 2) | (ocr << 1) | EA;
                ibuf[1] = control;
                ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
                /* Stuffing may double the size worst case */
@@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
 
 static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
 {
-       gsm_send(gsm, addr, 1, control);
+       gsm_send(gsm, addr, 0, control);
 }
 
 /**
@@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
  *     @tty: virtual tty bound to the DLCI
  *     @dlci: DLCI to affect
  *     @modem: modem bits (full EA)
- *     @clen: command length
+ *     @slen: number of signal octets
  *
  *     Used when a modem control message or line state inline in adaption
  *     layer 2 is processed. Sort out the local modem state and throttles
  */
 
 static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
-                                                       u32 modem, int clen)
+                                                       u32 modem, int slen)
 {
        int  mlines = 0;
        u8 brk = 0;
        int fc;
 
-       /* The modem status command can either contain one octet (v.24 signals)
-          or two octets (v.24 signals + break signals). The length field will
-          either be 2 or 3 respectively. This is specified in section
-          5.4.6.3.7 of the  27.010 mux spec. */
+       /* The modem status command can either contain one octet (V.24 signals)
+        * or two octets (V.24 signals + break signals). This is specified in
+        * section 5.4.6.3.7 of the 07.10 mux spec.
+        */
 
-       if (clen == 2)
+       if (slen == 1)
                modem = modem & 0x7f;
        else {
                brk = modem & 0x7f;
@@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
        unsigned int brk = 0;
        struct gsm_dlci *dlci;
        int len = clen;
+       int slen;
        const u8 *dp = data;
        struct tty_struct *tty;
 
@@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
                return;
        dlci = gsm->dlci[addr];
 
+       slen = len;
        while (gsm_read_ea(&modem, *dp++) == 0) {
                len--;
                if (len == 0)
@@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
                modem |= (brk & 0x7f);
        }
        tty = tty_port_tty_get(&dlci->port);
-       gsm_process_modem(tty, dlci, modem, clen);
+       gsm_process_modem(tty, dlci, modem, slen);
        if (tty) {
                tty_wakeup(tty);
                tty_kref_put(tty);
@@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
        if (dlci->addr != 0) {
                tty_port_tty_hangup(&dlci->port, false);
                kfifo_reset(&dlci->fifo);
+               /* Ensure that gsmtty_open() can return. */
+               tty_port_set_initialized(&dlci->port, 0);
+               wake_up_interruptible(&dlci->port.open_wait);
        } else
                dlci->gsm->dead = true;
        /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
@@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t)
                        dlci->mode = DLCI_MODE_ADM;
                        gsm_dlci_open(dlci);
                } else {
-                       gsm_dlci_close(dlci);
+                       gsm_dlci_begin_close(dlci); /* prevent half open link */
                }
 
                break;
@@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
        struct tty_struct *tty;
        unsigned int modem = 0;
        int len = clen;
+       int slen = 0;
 
        if (debug & 16)
                pr_debug("%d bytes for tty\n", len);
@@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
        case 2:         /* Asynchronous serial with line state in each frame */
                while (gsm_read_ea(&modem, *data++) == 0) {
                        len--;
+                       slen++;
                        if (len == 0)
                                return;
                }
+               slen++;
                tty = tty_port_tty_get(port);
                if (tty) {
-                       gsm_process_modem(tty, dlci, modem, clen);
+                       gsm_process_modem(tty, dlci, modem, slen);
                        tty_kref_put(tty);
                }
                fallthrough;
@@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
                gsm_destroy_network(dlci);
                mutex_unlock(&dlci->mutex);
 
-               tty_hangup(tty);
+               /* We cannot use tty_hangup() because in tty_kref_put() the tty
+                * driver assumes that the hangup queue is free and reuses it to
+                * queue release_one_tty() -> NULL pointer panic in
+                * process_one_work().
+                */
+               tty_vhangup(tty);
 
                tty_port_tty_set(&dlci->port, NULL);
                tty_kref_put(tty);
@@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm)
                goto invalid;
 
        cr = gsm->address & 1;          /* C/R bit */
+       cr ^= gsm->initiator ? 0 : 1;   /* Flip so 1 always means command */
 
        gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
 
-       cr ^= 1 - gsm->initiator;       /* Flip so 1 always means command */
        dlci = gsm->dlci[address];
 
        switch (gsm->control) {
@@ -3234,9 +3251,9 @@ static void gsmtty_throttle(struct tty_struct *tty)
        if (dlci->state == DLCI_CLOSED)
                return;
        if (C_CRTSCTS(tty))
-               dlci->modem_tx &= ~TIOCM_DTR;
+               dlci->modem_tx &= ~TIOCM_RTS;
        dlci->throttled = true;
-       /* Send an MSC with DTR cleared */
+       /* Send an MSC with RTS cleared */
        gsmtty_modem_update(dlci, 0);
 }
 
@@ -3246,9 +3263,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
        if (dlci->state == DLCI_CLOSED)
                return;
        if (C_CRTSCTS(tty))
-               dlci->modem_tx |= TIOCM_DTR;
+               dlci->modem_tx |= TIOCM_RTS;
        dlci->throttled = false;
-       /* Send an MSC with DTR set */
+       /* Send an MSC with RTS set */
        gsmtty_modem_update(dlci, 0);
 }
 
index 8933ef1..efc7210 100644 (file)
@@ -1329,7 +1329,7 @@ handle_newline:
                        put_tty_queue(c, ldata);
                        smp_store_release(&ldata->canon_head, ldata->read_head);
                        kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-                       wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+                       wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
                        return;
                }
        }
@@ -1561,7 +1561,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
 
        if (read_cnt(ldata)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-               wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+               wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
        }
 }
 
@@ -1926,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
                return false;
 
        canon_head = smp_load_acquire(&ldata->canon_head);
-       n = min(*nr + 1, canon_head - ldata->read_tail);
+       n = min(*nr, canon_head - ldata->read_tail);
 
        tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
        size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -1948,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
                n += N_TTY_BUF_SIZE;
        c = n + found;
 
-       if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
-               c = min(*nr, c);
+       if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
                n = c;
-       }
 
        n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
                    __func__, eol, found, n, c, tail, more);
index 673cda3..948d0a1 100644 (file)
@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
        unsigned long address;
        int err;
 
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
        if (!dev->irq && (dev->id.sversion == 0xad))
                dev->irq = iosapic_serial_irq(dev);
 #endif
index 025b055..95ff10f 100644 (file)
@@ -117,7 +117,7 @@ static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *i
        uart.port.private_data = pericom;
        uart.port.iotype = UPIO_PORT;
        uart.port.uartclk = 921600 * 16;
-       uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER;
+       uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
        uart.port.set_divisor = pericom_do_set_divisor;
        for (i = 0; i < nr && i < maxnr; i++) {
                unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8;
index 64e7e6c..38d1c07 100644 (file)
@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
 static void sc16is7xx_tx_proc(struct kthread_work *ws)
 {
        struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
 
        if ((port->rs485.flags & SER_RS485_ENABLED) &&
            (port->rs485.delay_rts_before_send > 0))
                msleep(port->rs485.delay_rts_before_send);
 
+       mutex_lock(&s->efr_lock);
        sc16is7xx_handle_tx(port);
+       mutex_unlock(&s->efr_lock);
 }
 
 static void sc16is7xx_reconf_rs485(struct uart_port *port)
index 3639bb6..5801369 100644 (file)
@@ -599,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa)
        if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
                return -ENXIO;
 
-       vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1);
        vsa.console--;
+       vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES);
        console_lock();
        ret = vc_allocate(vsa.console);
        if (ret) {
@@ -845,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty,
                        return -ENXIO;
 
                arg--;
+               arg = array_index_nospec(arg, MAX_NR_CONSOLES);
                console_lock();
                ret = vc_allocate(arg);
                console_unlock();
index 8f8405b..5509d38 100644 (file)
@@ -130,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = {
 
 static void ulpi_dev_release(struct device *dev)
 {
+       of_node_put(dev->of_node);
        kfree(to_ulpi_dev(dev));
 }
 
@@ -247,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
                return ret;
 
        ret = ulpi_read_id(ulpi);
-       if (ret)
+       if (ret) {
+               of_node_put(ulpi->dev.of_node);
                return ret;
+       }
 
        ret = device_register(&ulpi->dev);
-       if (ret)
+       if (ret) {
+               put_device(&ulpi->dev);
                return ret;
+       }
 
        dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
                ulpi->id.vendor, ulpi->id.product);
@@ -299,7 +304,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
  */
 void ulpi_unregister_interface(struct ulpi *ulpi)
 {
-       of_node_put(ulpi->dev.of_node);
        device_unregister(&ulpi->dev);
 }
 EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
index c2bbf97..d5bc36c 100644 (file)
@@ -602,11 +602,14 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
                return retval;
        }
 
-       find_and_link_peer(hub, port1);
-
        retval = component_add(&port_dev->dev, &connector_ops);
-       if (retval)
+       if (retval) {
                dev_warn(&port_dev->dev, "failed to add component\n");
+               device_unregister(&port_dev->dev);
+               return retval;
+       }
+
+       find_and_link_peer(hub, port1);
 
        /*
         * Enable runtime pm and hold a refernce that hub_configure()
index 8a63da3..88c337b 100644 (file)
@@ -1418,6 +1418,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
 void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
 int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
 #define dwc2_is_device_connected(hsotg) (hsotg->connected)
+#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
 int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
@@ -1454,6 +1455,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
                                           int testmode)
 { return 0; }
 #define dwc2_is_device_connected(hsotg) (0)
+#define dwc2_is_device_enabled(hsotg) (0)
 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
 { return 0; }
 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
index 1b39c47..d8d6493 100644 (file)
@@ -130,8 +130,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
                already = dwc2_ovr_avalid(hsotg, true);
        } else if (role == USB_ROLE_DEVICE) {
                already = dwc2_ovr_bvalid(hsotg, true);
-               /* This clear DCTL.SFTDISCON bit */
-               dwc2_hsotg_core_connect(hsotg);
+               if (dwc2_is_device_enabled(hsotg)) {
+                       /* This clear DCTL.SFTDISCON bit */
+                       dwc2_hsotg_core_connect(hsotg);
+               }
        } else {
                if (dwc2_is_device_mode(hsotg)) {
                        if (!dwc2_ovr_bvalid(hsotg, false))
index 7ff8fc8..06d0e88 100644 (file)
@@ -43,6 +43,7 @@
 #define PCI_DEVICE_ID_INTEL_ADLP               0x51ee
 #define PCI_DEVICE_ID_INTEL_ADLM               0x54ee
 #define PCI_DEVICE_ID_INTEL_ADLS               0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPLS               0x7a61
 #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
 #define PCI_DEVICE_ID_AMD_MR                   0x163a
 
@@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
 static struct gpiod_lookup_table platform_bytcr_gpios = {
        .dev_id         = "0000:00:16.0",
        .table          = {
-               GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
-               GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
                {}
        },
 };
@@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
        {}
 };
 
+static const struct property_entry dwc3_pci_intel_byt_properties[] = {
+       PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+       PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+       {}
+};
+
 static const struct property_entry dwc3_pci_mrfld_properties[] = {
        PROPERTY_ENTRY_STRING("dr_mode", "otg"),
        PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
@@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = {
        .properties = dwc3_pci_intel_properties,
 };
 
+static const struct software_node dwc3_pci_intel_byt_swnode = {
+       .properties = dwc3_pci_intel_byt_properties,
+};
+
 static const struct software_node dwc3_pci_intel_mrfld_swnode = {
        .properties = dwc3_pci_mrfld_properties,
 };
@@ -344,7 +356,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
          (kernel_ulong_t) &dwc3_pci_intel_swnode, },
 
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
-         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+         (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
 
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
          (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
@@ -409,6 +421,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
          (kernel_ulong_t) &dwc3_pci_intel_swnode, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
          (kernel_ulong_t) &dwc3_pci_intel_swnode, },
 
index e14ac15..a6f3a9b 100644 (file)
@@ -99,7 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
        struct device           *dev = priv_data->dev;
        struct reset_control    *crst, *hibrst, *apbrst;
        struct phy              *usb3_phy;
-       int                     ret;
+       int                     ret = 0;
        u32                     reg;
 
        usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
index 520031b..a0c883f 100644 (file)
@@ -1291,6 +1291,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
        if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
                trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
 
+       /*
+        * As per data book 4.2.3.2TRB Control Bit Rules section
+        *
+        * The controller autonomously checks the HWO field of a TRB to determine if the
+        * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
+        * is valid before setting the HWO field to '1'. In most systems, this means that
+        * software must update the fourth DWORD of a TRB last.
+        *
+        * However there is a possibility of CPU re-ordering here which can cause
+        * controller to observe the HWO bit set prematurely.
+        * Add a write memory barrier to prevent CPU re-ordering.
+        */
+       wmb();
        trb->ctrl |= DWC3_TRB_CTRL_HWO;
 
        dwc3_ep_inc_enq(dep);
@@ -4147,9 +4160,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
        unsigned long flags;
        irqreturn_t ret = IRQ_NONE;
 
+       local_bh_disable();
        spin_lock_irqsave(&dwc->lock, flags);
        ret = dwc3_process_event_buf(evt);
        spin_unlock_irqrestore(&dwc->lock, flags);
+       local_bh_enable();
 
        return ret;
 }
index 16f9e34..9315313 100644 (file)
@@ -1988,6 +1988,9 @@ unknown:
                                if (w_index != 0x5 || (w_value >> 8))
                                        break;
                                interface = w_value & 0xFF;
+                               if (interface >= MAX_CONFIG_INTERFACES ||
+                                   !os_desc_cfg->interface[interface])
+                                       break;
                                buf[6] = w_index;
                                count = count_ext_prop(os_desc_cfg,
                                        interface);
index 25ad1e9..1922fd0 100644 (file)
@@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs)
 
 static void ffs_data_closed(struct ffs_data *ffs)
 {
+       struct ffs_epfile *epfiles;
+       unsigned long flags;
+
        ENTER();
 
        if (atomic_dec_and_test(&ffs->opened)) {
                if (ffs->no_disconnect) {
                        ffs->state = FFS_DEACTIVATED;
-                       if (ffs->epfiles) {
-                               ffs_epfiles_destroy(ffs->epfiles,
-                                                  ffs->eps_count);
-                               ffs->epfiles = NULL;
-                       }
+                       spin_lock_irqsave(&ffs->eps_lock, flags);
+                       epfiles = ffs->epfiles;
+                       ffs->epfiles = NULL;
+                       spin_unlock_irqrestore(&ffs->eps_lock,
+                                                       flags);
+
+                       if (epfiles)
+                               ffs_epfiles_destroy(epfiles,
+                                                ffs->eps_count);
+
                        if (ffs->setup_state == FFS_SETUP_PENDING)
                                __ffs_ep0_stall(ffs);
                } else {
@@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
 
 static void ffs_data_clear(struct ffs_data *ffs)
 {
+       struct ffs_epfile *epfiles;
+       unsigned long flags;
+
        ENTER();
 
        ffs_closed(ffs);
 
        BUG_ON(ffs->gadget);
 
-       if (ffs->epfiles) {
-               ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+       spin_lock_irqsave(&ffs->eps_lock, flags);
+       epfiles = ffs->epfiles;
+       ffs->epfiles = NULL;
+       spin_unlock_irqrestore(&ffs->eps_lock, flags);
+
+       /*
+        * potential race possible between ffs_func_eps_disable
+        * & ffs_epfile_release therefore maintaining a local
+        * copy of epfile will save us from use-after-free.
+        */
+       if (epfiles) {
+               ffs_epfiles_destroy(epfiles, ffs->eps_count);
                ffs->epfiles = NULL;
        }
 
@@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
 
 static void ffs_func_eps_disable(struct ffs_function *func)
 {
-       struct ffs_ep *ep         = func->eps;
-       struct ffs_epfile *epfile = func->ffs->epfiles;
-       unsigned count            = func->ffs->eps_count;
+       struct ffs_ep *ep;
+       struct ffs_epfile *epfile;
+       unsigned short count;
        unsigned long flags;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
+       count = func->ffs->eps_count;
+       epfile = func->ffs->epfiles;
+       ep = func->eps;
        while (count--) {
                /* pending requests get nuked */
                if (ep->ep)
@@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func)
 
 static int ffs_func_eps_enable(struct ffs_function *func)
 {
-       struct ffs_data *ffs      = func->ffs;
-       struct ffs_ep *ep         = func->eps;
-       struct ffs_epfile *epfile = ffs->epfiles;
-       unsigned count            = ffs->eps_count;
+       struct ffs_data *ffs;
+       struct ffs_ep *ep;
+       struct ffs_epfile *epfile;
+       unsigned short count;
        unsigned long flags;
        int ret = 0;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
+       ffs = func->ffs;
+       ep = func->eps;
+       epfile = ffs->epfiles;
+       count = ffs->eps_count;
        while(count--) {
                ep->ep->driver_data = ep;
 
index 36fa6ef..097a709 100644 (file)
@@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
 
        .bDescriptorSubtype = UAC_INPUT_TERMINAL,
        /* .bTerminalID = DYNAMIC */
-       .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
+       .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
        .bAssocTerminal = 0,
        /* .bCSourceID = DYNAMIC */
        .iChannelNames = 0,
@@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
 
        .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
        /* .bTerminalID = DYNAMIC */
-       .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
+       .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
        .bAssocTerminal = 0,
        /* .bSourceID = DYNAMIC */
        /* .bCSourceID = DYNAMIC */
index 431d5a7..00b3f6b 100644 (file)
@@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params,
        rndis_set_cmplt_type *resp;
        rndis_resp_t *r;
 
+       BufLength = le32_to_cpu(buf->InformationBufferLength);
+       BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+       if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+           (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+                   return -EINVAL;
+
        r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
        if (!r)
                return -ENOMEM;
        resp = (rndis_set_cmplt_type *)r->buf;
 
-       BufLength = le32_to_cpu(buf->InformationBufferLength);
-       BufOffset = le32_to_cpu(buf->InformationBufferOffset);
-
 #ifdef VERBOSE_DEBUG
        pr_debug("%s: Length: %d\n", __func__, BufLength);
        pr_debug("%s: Offset: %d\n", __func__, BufOffset);
@@ -919,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
        params->resp_avail = resp_avail;
        params->v = v;
        INIT_LIST_HEAD(&params->resp_queue);
+       spin_lock_init(&params->resp_lock);
        pr_debug("%s: configNr = %d\n", __func__, i);
 
        return params;
@@ -1012,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
 {
        rndis_resp_t *r, *n;
 
+       spin_lock(&params->resp_lock);
        list_for_each_entry_safe(r, n, &params->resp_queue, list) {
                if (r->buf == buf) {
                        list_del(&r->list);
                        kfree(r);
                }
        }
+       spin_unlock(&params->resp_lock);
 }
 EXPORT_SYMBOL_GPL(rndis_free_response);
 
@@ -1027,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
 
        if (!length) return NULL;
 
+       spin_lock(&params->resp_lock);
        list_for_each_entry_safe(r, n, &params->resp_queue, list) {
                if (!r->send) {
                        r->send = 1;
                        *length = r->length;
+                       spin_unlock(&params->resp_lock);
                        return r->buf;
                }
        }
 
+       spin_unlock(&params->resp_lock);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(rndis_get_next_response);
@@ -1051,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
        r->length = length;
        r->send = 0;
 
+       spin_lock(&params->resp_lock);
        list_add_tail(&r->list, &params->resp_queue);
+       spin_unlock(&params->resp_lock);
        return r;
 }
 
index f6167f7..6206b8b 100644 (file)
@@ -174,6 +174,7 @@ typedef struct rndis_params {
        void                    (*resp_avail)(void *v);
        void                    *v;
        struct list_head        resp_queue;
+       spinlock_t              resp_lock;
 } rndis_params;
 
 /* RNDIS Message parser and other useless functions */
index c5a2c73..d86c3a3 100644 (file)
@@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
                ret = -EBUSY;
                goto out_unlock;
        }
-       if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
+       if (in != usb_endpoint_dir_in(ep->ep->desc)) {
                dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
                ret = -EINVAL;
                goto out_unlock;
index 57d417a..601829a 100644 (file)
@@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev,
        switch (role) {
        case USB_ROLE_NONE:
                usb3->connection_state = USB_ROLE_NONE;
+               if (cur_role == USB_ROLE_HOST)
+                       device_release_driver(host);
                if (usb3->driver)
                        usb3_disconnect(usb3);
                usb3_vbus_out(usb3, false);
index 6ce886f..2907fad 100644 (file)
@@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
                break;
        case USB_RECIP_ENDPOINT:
                epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+               if (epnum >= XUSB_MAX_ENDPOINTS)
+                       goto stall;
                target_ep = &udc->ep[epnum];
                epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
                halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
@@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
        case USB_RECIP_ENDPOINT:
                if (!udc->setup.wValue) {
                        endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+                       if (endpoint >= XUSB_MAX_ENDPOINTS) {
+                               xudc_ep0_stall(udc);
+                               return;
+                       }
                        target_ep = &udc->ep[endpoint];
                        outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
                        outinbit = outinbit >> 7;
index dc357ca..2d37854 100644 (file)
@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        int                     retval = 0;
        bool                    comp_timer_running = false;
        bool                    pending_portevent = false;
+       bool                    reinit_xhc = false;
 
        if (!hcd->state)
                return 0;
@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
 
        spin_lock_irq(&xhci->lock);
-       if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
-               hibernated = true;
 
-       if (!hibernated) {
+       if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+               reinit_xhc = true;
+
+       if (!reinit_xhc) {
                /*
                 * Some controllers might lose power during suspend, so wait
                 * for controller not ready bit to clear, just as in xHC init.
@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
                }
-               temp = readl(&xhci->op_regs->status);
        }
 
-       /* If restore operation fails, re-initialize the HC during resume */
-       if ((temp & STS_SRE) || hibernated) {
+       temp = readl(&xhci->op_regs->status);
 
+       /* re-initialize the HC on Restore Error, or Host Controller Error */
+       if (temp & (STS_SRE | STS_HCE)) {
+               reinit_xhc = true;
+               xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+       }
+
+       if (reinit_xhc) {
                if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
                                !(xhci_all_ports_seen_u0(xhci))) {
                        del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
        struct urb_priv *urb_priv;
        int num_tds;
 
-       if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
-                                       true, true, __func__) <= 0)
+       if (!urb)
                return -EINVAL;
+       ret = xhci_check_args(hcd, urb->dev, urb->ep,
+                                       true, true, __func__);
+       if (ret <= 0)
+               return ret ? ret : -EINVAL;
 
        slot_id = urb->dev->slot_id;
        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
                return -EINVAL;
        ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
        if (ret <= 0)
-               return -EINVAL;
+               return ret ? ret : -EINVAL;
        if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
                xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
                                " descriptor for ep 0x%x does not support streams\n",
index 507deef..04c4e3f 100644 (file)
@@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
        if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
                hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
 
+       if (of_property_read_u8(np, "boost-up", &hub->boost_up))
+               hub->boost_up = USB251XB_DEF_BOOST_UP;
+
        cproperty_char = of_get_property(np, "manufacturer", NULL);
        strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
                sizeof(str));
@@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
         * may be as soon as needed.
         */
        hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
-       hub->boost_up = USB251XB_DEF_BOOST_UP;
        hub->boost_57 = USB251XB_DEF_BOOST_57;
        hub->boost_14 = USB251XB_DEF_BOOST_14;
        hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
index 29f4b87..2798fca 100644 (file)
 #define CH341_QUIRK_SIMULATE_BREAK     BIT(1)
 
 static const struct usb_device_id id_table[] = {
-       { USB_DEVICE(0x1a86, 0x5512) },
        { USB_DEVICE(0x1a86, 0x5523) },
        { USB_DEVICE(0x1a86, 0x7522) },
        { USB_DEVICE(0x1a86, 0x7523) },
+       { USB_DEVICE(0x2184, 0x0057) },
        { USB_DEVICE(0x4348, 0x5523) },
        { USB_DEVICE(0x9986, 0x7523) },
        { },
index 8a60c0d..a27f7ef 100644 (file)
@@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port);
 static void cp210x_disable_event_mode(struct usb_serial_port *port);
 
 static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */
        { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
        { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
        { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
@@ -68,6 +69,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
        { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
        { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
+       { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */
        { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
        { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
        { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
index 4edebd1..49c08f0 100644 (file)
@@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
+       { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
@@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
+       { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
+       { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
index 755858c..d1a9564 100644 (file)
 #define BRAINBOXES_VX_023_PID          0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
 #define BRAINBOXES_VX_034_PID          0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
 #define BRAINBOXES_US_101_PID          0x1011 /* US-101 1xRS232 */
+#define BRAINBOXES_US_159_PID          0x1021 /* US-159 1xRS232 */
+#define BRAINBOXES_US_235_PID          0x1017 /* US-235 1xRS232 */
+#define BRAINBOXES_US_320_PID          0x1019 /* US-320 1xRS422/485 */
 #define BRAINBOXES_US_324_PID          0x1013 /* US-324 1xRS422/485 1Mbaud */
 #define BRAINBOXES_US_606_1_PID                0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
 #define BRAINBOXES_US_606_2_PID                0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
index 42420bf..e7755d9 100644 (file)
@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
 
 #define DELL_PRODUCT_5821E                     0x81d7
 #define DELL_PRODUCT_5821E_ESIM                        0x81e0
+#define DELL_PRODUCT_5829E_ESIM                        0x81e4
+#define DELL_PRODUCT_5829E                     0x81e6
 
 #define KYOCERA_VENDOR_ID                      0x0c88
 #define KYOCERA_PRODUCT_KPC650                 0x17da
@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
          .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+       { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
+         .driver_info = RSVD(0) | RSVD(6) },
+       { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
+         .driver_info = RSVD(0) | RSVD(6) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },   /* ADU-E100, ADU-310 */
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),    /* Telit LE910-S1 (ECM) */
          .driver_info = NCTRL(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff),    /* Telit LE910R1 (RNDIS) */
+         .driver_info = NCTRL(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff),    /* Telit LE910R1 (ECM) */
+         .driver_info = NCTRL(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* Telit SBL FN980 flashing device */
          .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x9200),                          /* Telit LE910S1 flashing device */
          .driver_info = NCTRL(0) | ZLP },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x9201),                          /* Telit LE910R1 flashing device */
+         .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
@@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(2) },
        { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },    /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff),  /* ZTE MF286D */
+         .driver_info = RSVD(5) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
index 6d27a5b..7ffcda9 100644 (file)
@@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client)
 
        ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
        if (ret < 0)
-               return ret;
+               goto err_clear_mask;
        trace_tps6598x_status(status);
 
        ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
        if (ret < 0)
-               return ret;
+               goto err_clear_mask;
 
        /*
         * This fwnode has a "compatible" property, but is never populated as a
@@ -855,7 +855,8 @@ err_role_put:
        usb_role_switch_put(tps->role_sw);
 err_fwnode_put:
        fwnode_handle_put(fwnode);
-
+err_clear_mask:
+       tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
        return ret;
 }
 
index f648f1c..d0f9107 100644 (file)
@@ -1563,11 +1563,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
        switch (cmd) {
        case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+               /* This mq feature check aligns with pre-existing userspace
+                * implementation.
+                *
+                * Without it, an untrusted driver could fake a multiqueue config
+                * request down to a non-mq device that may cause kernel to
+                * panic due to uninitialized resources for extra vqs. Even with
+                * a well behaving guest driver, it is not expected to allow
+                * changing the number of vqs on a non-mq device.
+                */
+               if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ))
+                       break;
+
                read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
                if (read != sizeof(mq))
                        break;
 
                newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+               if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+                   newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+                       break;
+
                if (ndev->cur_num_vqs == 2 * newqps) {
                        status = VIRTIO_NET_OK;
                        break;
@@ -1897,11 +1913,25 @@ static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
        return ndev->mvdev.mlx_features;
 }
 
-static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
 {
+       /* Minimum features to expect */
        if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
                return -EOPNOTSUPP;
 
+       /* Double check features combination sent down by the driver.
+        * Fail invalid features due to absence of the depended feature.
+        *
+        * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit
+        * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ".
+        * By failing the invalid features sent down by untrusted drivers,
+        * we're assured the assumption made upon is_index_valid() and
+        * is_ctrl_vq_idx() will not be compromised.
+        */
+       if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) ==
+            BIT_ULL(VIRTIO_NET_F_MQ))
+               return -EINVAL;
+
        return 0;
 }
 
@@ -1977,7 +2007,7 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
 
        print_features(mvdev, features, true);
 
-       err = verify_min_features(mvdev, features);
+       err = verify_driver_features(mvdev, features);
        if (err)
                return err;
 
index 9846c9d..1ea5254 100644 (file)
@@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
         * If it does happen we assume a legacy guest.
         */
        if (!vdev->features_valid)
-               vdpa_set_features(vdev, 0, true);
+               vdpa_set_features_unlocked(vdev, 0);
        ops->get_config(vdev, offset, buf, len);
 }
 
index 2b1143f..0a4d93e 100644 (file)
@@ -294,7 +294,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
 
        iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
 
-       return iova_pfn << shift;
+       return (dma_addr_t)iova_pfn << shift;
 }
 
 static void vduse_domain_free_iova(struct iova_domain *iovad,
index a57e381..cce101e 100644 (file)
@@ -533,8 +533,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
 {
        struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
 
-       vdpa_unregister_device(&vp_vdpa->vdpa);
        vp_modern_remove(&vp_vdpa->mdev);
+       vdpa_unregister_device(&vp_vdpa->vdpa);
 }
 
 static struct pci_driver vp_vdpa_driver = {
index 670d56c..40b0983 100644 (file)
@@ -57,6 +57,17 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
        if (last < start)
                return -EFAULT;
 
+       /* If the range being mapped is [0, ULONG_MAX], split it into two entries
+        * otherwise its size would overflow u64.
+        */
+       if (start == 0 && last == ULONG_MAX) {
+               u64 mid = last / 2;
+
+               vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
+               addr += mid + 1;
+               start = mid + 1;
+       }
+
        if (iotlb->limit &&
            iotlb->nmaps == iotlb->limit &&
            iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
index 8515398..ec5249e 100644 (file)
@@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
        if (copy_from_user(&features, featurep, sizeof(features)))
                return -EFAULT;
 
-       if (vdpa_set_features(vdpa, features, false))
+       if (vdpa_set_features(vdpa, features))
                return -EINVAL;
 
        return 0;
index 59edb5a..082380c 100644 (file)
@@ -1170,6 +1170,11 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
                goto done;
        }
 
+       if (msg.size == 0) {
+               ret = -EINVAL;
+               goto done;
+       }
+
        if (dev->msg_handler)
                ret = dev->msg_handler(dev, &msg);
        else
@@ -1981,7 +1986,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
        return 0;
 }
 
-static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+static int vhost_update_avail_event(struct vhost_virtqueue *vq)
 {
        if (vhost_put_avail_event(vq))
                return -EFAULT;
@@ -2527,7 +2532,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                        return false;
                }
        } else {
-               r = vhost_update_avail_event(vq, vq->avail_idx);
+               r = vhost_update_avail_event(vq);
                if (r) {
                        vq_err(vq, "Failed to update avail event index at %p: %d\n",
                               vhost_avail_event(vq), r);
index d6ca1c7..37f0b42 100644 (file)
@@ -629,16 +629,18 @@ err:
        return ret;
 }
 
-static int vhost_vsock_stop(struct vhost_vsock *vsock)
+static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
 {
        size_t i;
-       int ret;
+       int ret = 0;
 
        mutex_lock(&vsock->dev.mutex);
 
-       ret = vhost_dev_check_owner(&vsock->dev);
-       if (ret)
-               goto err;
+       if (check_owner) {
+               ret = vhost_dev_check_owner(&vsock->dev);
+               if (ret)
+                       goto err;
+       }
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
                struct vhost_virtqueue *vq = &vsock->vqs[i];
@@ -753,7 +755,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
         * inefficient.  Room for improvement here. */
        vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
 
-       vhost_vsock_stop(vsock);
+       /* Don't check the owner, because we are in the release path, so we
+        * need to stop the vsock device in any case.
+        * vhost_vsock_stop() can not fail in this case, so we don't need to
+        * check the return code.
+        */
+       vhost_vsock_stop(vsock, false);
        vhost_vsock_flush(vsock);
        vhost_dev_stop(&vsock->dev);
 
@@ -868,7 +875,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
                if (start)
                        return vhost_vsock_start(vsock);
                else
-                       return vhost_vsock_stop(vsock);
+                       return vhost_vsock_stop(vsock, true);
        case VHOST_GET_FEATURES:
                features = VHOST_VSOCK_FEATURES;
                if (copy_to_user(argp, &features, sizeof(features)))
index f36829e..2fc1b80 100644 (file)
@@ -1025,7 +1025,7 @@ static void fbcon_init(struct vc_data *vc, int init)
        struct vc_data *svc = *default_mode;
        struct fbcon_display *t, *p = &fb_display[vc->vc_num];
        int logo = 1, new_rows, new_cols, rows, cols;
-       int cap, ret;
+       int ret;
 
        if (WARN_ON(info_idx == -1))
            return;
@@ -1034,7 +1034,6 @@ static void fbcon_init(struct vc_data *vc, int init)
                con2fb_map[vc->vc_num] = info_idx;
 
        info = registered_fb[con2fb_map[vc->vc_num]];
-       cap = info->flags;
 
        if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
                logo_shown = FBCON_LOGO_DONTSHOW;
@@ -1137,8 +1136,8 @@ static void fbcon_init(struct vc_data *vc, int init)
        ops->graphics = 0;
 
 #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
-       if ((cap & FBINFO_HWACCEL_COPYAREA) &&
-           !(cap & FBINFO_HWACCEL_DISABLED))
+       if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
+           !(info->flags & FBINFO_HWACCEL_DISABLED))
                p->scrollmode = SCROLL_MOVE;
        else /* default to something safe */
                p->scrollmode = SCROLL_REDRAW;
index 0fa7ede..13083ad 100644 (file)
@@ -1160,6 +1160,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
                ret = fbcon_set_con2fb_map_ioctl(argp);
                break;
        case FBIOBLANK:
+               if (arg > FB_BLANK_POWERDOWN)
+                       return -EINVAL;
                console_lock();
                lock_fb_info(info);
                ret = fb_blank(info, arg);
index 34f80b7..492fc26 100644 (file)
@@ -105,7 +105,6 @@ config VIRTIO_BALLOON
 
 config VIRTIO_MEM
        tristate "Virtio mem driver"
-       default m
        depends on X86_64
        depends on VIRTIO
        depends on MEMORY_HOTPLUG
index 00ac9db..22f15f4 100644 (file)
@@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status)
 }
 EXPORT_SYMBOL_GPL(virtio_add_status);
 
-int virtio_finalize_features(struct virtio_device *dev)
+/* Do some validation, then set FEATURES_OK */
+static int virtio_features_ok(struct virtio_device *dev)
 {
-       int ret = dev->config->finalize_features(dev);
        unsigned status;
+       int ret;
 
        might_sleep();
-       if (ret)
-               return ret;
 
        ret = arch_has_restricted_virtio_memory_access();
        if (ret) {
@@ -202,8 +201,23 @@ int virtio_finalize_features(struct virtio_device *dev)
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(virtio_finalize_features);
 
+/**
+ * virtio_reset_device - quiesce device for removal
+ * @dev: the device to reset
+ *
+ * Prevents device from sending interrupts and accessing memory.
+ *
+ * Generally used for cleanup during driver / device removal.
+ *
+ * Once this has been invoked, caller must ensure that
+ * virtqueue_notify / virtqueue_kick are not in progress.
+ *
+ * Note: this guarantees that vq callbacks are not in progress, however caller
+ * is responsible for preventing access from other contexts, such as a system
+ * call/workqueue/bh.  Invoking virtio_break_device then flushing any such
+ * contexts is one way to handle that.
+ * */
 void virtio_reset_device(struct virtio_device *dev)
 {
        dev->config->reset(dev);
@@ -245,17 +259,6 @@ static int virtio_dev_probe(struct device *_d)
                driver_features_legacy = driver_features;
        }
 
-       /*
-        * Some devices detect legacy solely via F_VERSION_1. Write
-        * F_VERSION_1 to force LE config space accesses before FEATURES_OK for
-        * these when needed.
-        */
-       if (drv->validate && !virtio_legacy_is_little_endian()
-                         && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
-               dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
-               dev->config->finalize_features(dev);
-       }
-
        if (device_features & (1ULL << VIRTIO_F_VERSION_1))
                dev->features = driver_features & device_features;
        else
@@ -266,13 +269,26 @@ static int virtio_dev_probe(struct device *_d)
                if (device_features & (1ULL << i))
                        __virtio_set_bit(dev, i);
 
+       err = dev->config->finalize_features(dev);
+       if (err)
+               goto err;
+
        if (drv->validate) {
+               u64 features = dev->features;
+
                err = drv->validate(dev);
                if (err)
                        goto err;
+
+               /* Did validation change any features? Then write them again. */
+               if (features != dev->features) {
+                       err = dev->config->finalize_features(dev);
+                       if (err)
+                               goto err;
+               }
        }
 
-       err = virtio_finalize_features(dev);
+       err = virtio_features_ok(dev);
        if (err)
                goto err;
 
@@ -496,7 +512,11 @@ int virtio_device_restore(struct virtio_device *dev)
        /* We have a driver! */
        virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
 
-       ret = virtio_finalize_features(dev);
+       ret = dev->config->finalize_features(dev);
+       if (ret)
+               goto err;
+
+       ret = virtio_features_ok(dev);
        if (ret)
                goto err;
 
index 7767a7f..7650455 100644 (file)
@@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
        /* Give virtio_ring a chance to accept features. */
        vring_transport_features(vdev);
 
-       return vdpa_set_features(vdpa, vdev->features, false);
+       return vdpa_set_features(vdpa, vdev->features);
 }
 
 static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
index 2c890f4..72d4e3f 100644 (file)
@@ -264,7 +264,7 @@ struct xen_device_domain_owner {
 };
 
 static DEFINE_SPINLOCK(dev_domain_list_spinlock);
-static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
+static LIST_HEAD(dev_domain_list);
 
 static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
 {
index 7a2b11c..6c7dc13 100644 (file)
@@ -369,8 +369,8 @@ source "fs/ksmbd/Kconfig"
 
 config SMBFS_COMMON
        tristate
-       default y if CIFS=y
-       default m if CIFS=m
+       default y if CIFS=y || SMB_SERVER=y
+       default m if CIFS=m || SMB_SERVER=m
 
 source "fs/coda/Kconfig"
 source "fs/afs/Kconfig"
index 605017e..d61543f 100644 (file)
@@ -1117,7 +1117,7 @@ out_free_interp:
                         * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
                         */
                        alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
-                       if (alignment > ELF_MIN_ALIGN) {
+                       if (interpreter || alignment > ELF_MIN_ALIGN) {
                                load_bias = ELF_ET_DYN_BASE;
                                if (current->flags & PF_RANDOMIZE)
                                        load_bias += arch_mmap_rnd();
@@ -1135,14 +1135,25 @@ out_free_interp:
                         * is then page aligned.
                         */
                        load_bias = ELF_PAGESTART(load_bias - vaddr);
-               }
 
-               /*
-                * Calculate the entire size of the ELF mapping (total_size).
-                * (Note that load_addr_set is set to true later once the
-                * initial mapping is performed.)
-                */
-               if (!load_addr_set) {
+                       /*
+                        * Calculate the entire size of the ELF mapping
+                        * (total_size), used for the initial mapping,
+                        * due to load_addr_set which is set to true later
+                        * once the initial mapping is performed.
+                        *
+                        * Note that this is only sensible when the LOAD
+                        * segments are contiguous (or overlapping). If
+                        * used for LOADs that are far apart, this would
+                        * cause the holes between LOADs to be mapped,
+                        * running the risk of having the mapping fail,
+                        * as it would be larger than the ELF file itself.
+                        *
+                        * As a result, only ET_DYN does this, since
+                        * some ET_EXEC (e.g. ia64) may have large virtual
+                        * memory holes between LOADs.
+                        *
+                        */
                        total_size = total_mapping_size(elf_phdata,
                                                        elf_ex->e_phnum);
                        if (!total_size) {
index c07f357..e1eae7e 100644 (file)
@@ -817,20 +817,16 @@ static struct file_system_type bm_fs_type = {
 };
 MODULE_ALIAS_FS("binfmt_misc");
 
-static struct ctl_table_header *binfmt_misc_header;
-
 static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err)
                insert_binfmt(&misc_format);
-       binfmt_misc_header = register_sysctl_mount_point("fs/binfmt_misc");
-       return 0;
+       return err;
 }
 
 static void __exit exit_misc_binfmt(void)
 {
-       unregister_sysctl_table(binfmt_misc_header);
        unregister_binfmt(&misc_format);
        unregister_filesystem(&bm_fs_type);
 }
index 8992e00..ebb2d10 100644 (file)
@@ -602,6 +602,9 @@ enum {
        /* Indicate that we want the transaction kthread to commit right now. */
        BTRFS_FS_COMMIT_TRANS,
 
+       /* Indicate we have half completed snapshot deletions pending. */
+       BTRFS_FS_UNFINISHED_DROPS,
+
 #if BITS_PER_LONG == 32
        /* Indicate if we have error/warn message printed on 32bit systems */
        BTRFS_FS_32BIT_ERROR,
@@ -1106,8 +1109,15 @@ enum {
        BTRFS_ROOT_QGROUP_FLUSHING,
        /* We started the orphan cleanup for this root. */
        BTRFS_ROOT_ORPHAN_CLEANUP,
+       /* This root has a drop operation that was started previously. */
+       BTRFS_ROOT_UNFINISHED_DROP,
 };
 
+static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
+{
+       clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
+}
+
 /*
  * Record swapped tree blocks of a subvolume tree for delayed subtree trace
  * code. For detail check comment in fs/btrfs/qgroup.c.
@@ -3291,7 +3301,7 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
 int __init btrfs_auto_defrag_init(void);
 void __cold btrfs_auto_defrag_exit(void);
 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
-                          struct btrfs_inode *inode);
+                          struct btrfs_inode *inode, u32 extent_thresh);
 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
index 87a5add..48590a3 100644 (file)
@@ -3813,6 +3813,10 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 
        set_bit(BTRFS_FS_OPEN, &fs_info->flags);
 
+       /* Kick the cleaner thread so it'll start deleting snapshots. */
+       if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
+               wake_up_process(fs_info->cleaner_kthread);
+
 clear_oneshot:
        btrfs_clear_oneshot_options(fs_info);
        return 0;
@@ -4538,6 +4542,12 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
         */
        kthread_park(fs_info->cleaner_kthread);
 
+       /*
+        * If we had UNFINISHED_DROPS we could still be processing them, so
+        * clear that bit and wake up relocation so it can stop.
+        */
+       btrfs_wake_unfinished_drop(fs_info);
+
        /* wait for the qgroup rescan worker to stop */
        btrfs_qgroup_wait_for_completion(fs_info, false);
 
index d89273c..96427b1 100644 (file)
@@ -5622,6 +5622,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
        int ret;
        int level;
        bool root_dropped = false;
+       bool unfinished_drop = false;
 
        btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
 
@@ -5664,6 +5665,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
         * already dropped.
         */
        set_bit(BTRFS_ROOT_DELETING, &root->state);
+       unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
+
        if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
                level = btrfs_header_level(root->node);
                path->nodes[level] = btrfs_lock_root_node(root);
@@ -5838,6 +5841,13 @@ out_free:
        kfree(wc);
        btrfs_free_path(path);
 out:
+       /*
+        * We were an unfinished drop root, check to see if there are any
+        * pending, and if not clear and wake up any waiters.
+        */
+       if (!err && unfinished_drop)
+               btrfs_maybe_wake_unfinished_drop(fs_info);
+
        /*
         * So if we need to stop dropping the snapshot for whatever reason we
         * need to make sure to add it back to the dead root list so that we
index 409bad3..4c91060 100644 (file)
@@ -6841,14 +6841,24 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb,
 {
        struct btrfs_fs_info *fs_info = eb->fs_info;
 
+       /*
+        * If we are using the commit root we could potentially clear a page
+        * Uptodate while we're using the extent buffer that we've previously
+        * looked up.  We don't want to complain in this case, as the page was
+        * valid before, we just didn't write it out.  Instead we want to catch
+        * the case where we didn't actually read the block properly, which
+        * would have !PageUptodate && !PageError, as we clear PageError before
+        * reading.
+        */
        if (fs_info->sectorsize < PAGE_SIZE) {
-               bool uptodate;
+               bool uptodate, error;
 
                uptodate = btrfs_subpage_test_uptodate(fs_info, page,
                                                       eb->start, eb->len);
-               WARN_ON(!uptodate);
+               error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len);
+               WARN_ON(!uptodate && !error);
        } else {
-               WARN_ON(!PageUptodate(page));
+               WARN_ON(!PageUptodate(page) && !PageError(page));
        }
 }
 
index 5a36add..c28cedd 100644 (file)
@@ -261,6 +261,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
                        em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
                        em->mod_start = merge->mod_start;
                        em->generation = max(em->generation, merge->generation);
+                       set_bit(EXTENT_FLAG_MERGED, &em->flags);
 
                        rb_erase_cached(&merge->rb_node, &tree->map);
                        RB_CLEAR_NODE(&merge->rb_node);
@@ -278,6 +279,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
                RB_CLEAR_NODE(&merge->rb_node);
                em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
                em->generation = max(em->generation, merge->generation);
+               set_bit(EXTENT_FLAG_MERGED, &em->flags);
                free_extent_map(merge);
        }
 }
index 8e21733..d2fa32f 100644 (file)
@@ -25,6 +25,8 @@ enum {
        EXTENT_FLAG_FILLING,
        /* filesystem extent mapping type */
        EXTENT_FLAG_FS_MAPPING,
+       /* This em is merged from two or more physically adjacent ems */
+       EXTENT_FLAG_MERGED,
 };
 
 struct extent_map {
@@ -40,6 +42,12 @@ struct extent_map {
        u64 ram_bytes;
        u64 block_start;
        u64 block_len;
+
+       /*
+        * Generation of the extent map, for merged em it's the highest
+        * generation of all merged ems.
+        * For non-merged extents, it's from btrfs_file_extent_item::generation.
+        */
        u64 generation;
        unsigned long flags;
        /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
index 11204db..a0179cc 100644 (file)
@@ -50,11 +50,14 @@ struct inode_defrag {
        /* root objectid */
        u64 root;
 
-       /* last offset we were able to defrag */
-       u64 last_offset;
-
-       /* if we've wrapped around back to zero once already */
-       int cycled;
+       /*
+        * The extent size threshold for autodefrag.
+        *
+        * This value is different for compressed/non-compressed extents,
+        * thus needs to be passed from higher layer.
+        * (aka, inode_should_defrag())
+        */
+       u32 extent_thresh;
 };
 
 static int __compare_inode_defrag(struct inode_defrag *defrag1,
@@ -107,8 +110,8 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
                         */
                        if (defrag->transid < entry->transid)
                                entry->transid = defrag->transid;
-                       if (defrag->last_offset > entry->last_offset)
-                               entry->last_offset = defrag->last_offset;
+                       entry->extent_thresh = min(defrag->extent_thresh,
+                                                  entry->extent_thresh);
                        return -EEXIST;
                }
        }
@@ -134,7 +137,7 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
  * enabled
  */
 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
-                          struct btrfs_inode *inode)
+                          struct btrfs_inode *inode, u32 extent_thresh)
 {
        struct btrfs_root *root = inode->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -160,6 +163,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        defrag->ino = btrfs_ino(inode);
        defrag->transid = transid;
        defrag->root = root->root_key.objectid;
+       defrag->extent_thresh = extent_thresh;
 
        spin_lock(&fs_info->defrag_inodes_lock);
        if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
@@ -178,34 +182,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-/*
- * Requeue the defrag object. If there is a defrag object that points to
- * the same inode in the tree, we will merge them together (by
- * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
- */
-static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
-                                      struct inode_defrag *defrag)
-{
-       struct btrfs_fs_info *fs_info = inode->root->fs_info;
-       int ret;
-
-       if (!__need_auto_defrag(fs_info))
-               goto out;
-
-       /*
-        * Here we don't check the IN_DEFRAG flag, because we need merge
-        * them together.
-        */
-       spin_lock(&fs_info->defrag_inodes_lock);
-       ret = __btrfs_add_inode_defrag(inode, defrag);
-       spin_unlock(&fs_info->defrag_inodes_lock);
-       if (ret)
-               goto out;
-       return;
-out:
-       kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-}
-
 /*
  * pick the defragable inode that we want, if it doesn't exist, we will get
  * the next one.
@@ -278,8 +254,14 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
        struct btrfs_root *inode_root;
        struct inode *inode;
        struct btrfs_ioctl_defrag_range_args range;
-       int num_defrag;
-       int ret;
+       int ret = 0;
+       u64 cur = 0;
+
+again:
+       if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+               goto cleanup;
+       if (!__need_auto_defrag(fs_info))
+               goto cleanup;
 
        /* get the inode */
        inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
@@ -295,39 +277,30 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
                goto cleanup;
        }
 
+       if (cur >= i_size_read(inode)) {
+               iput(inode);
+               goto cleanup;
+       }
+
        /* do a chunk of defrag */
        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
        memset(&range, 0, sizeof(range));
        range.len = (u64)-1;
-       range.start = defrag->last_offset;
+       range.start = cur;
+       range.extent_thresh = defrag->extent_thresh;
 
        sb_start_write(fs_info->sb);
-       num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+       ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
                                       BTRFS_DEFRAG_BATCH);
        sb_end_write(fs_info->sb);
-       /*
-        * if we filled the whole defrag batch, there
-        * must be more work to do.  Queue this defrag
-        * again
-        */
-       if (num_defrag == BTRFS_DEFRAG_BATCH) {
-               defrag->last_offset = range.start;
-               btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
-       } else if (defrag->last_offset && !defrag->cycled) {
-               /*
-                * we didn't fill our defrag batch, but
-                * we didn't start at zero.  Make sure we loop
-                * around to the start of the file.
-                */
-               defrag->last_offset = 0;
-               defrag->cycled = 1;
-               btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
-       } else {
-               kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-       }
-
        iput(inode);
-       return 0;
+
+       if (ret < 0)
+               goto cleanup;
+
+       cur = max(cur + fs_info->sectorsize, range.start);
+       goto again;
+
 cleanup:
        kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
        return ret;
index 3b2403b..5bbea5e 100644 (file)
@@ -560,12 +560,12 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
 }
 
 static inline void inode_should_defrag(struct btrfs_inode *inode,
-               u64 start, u64 end, u64 num_bytes, u64 small_write)
+               u64 start, u64 end, u64 num_bytes, u32 small_write)
 {
        /* If this is a small write inside eof, kick off a defrag */
        if (num_bytes < small_write &&
            (start > 0 || end + 1 < inode->disk_i_size))
-               btrfs_add_inode_defrag(NULL, inode);
+               btrfs_add_inode_defrag(NULL, inode, small_write);
 }
 
 /*
@@ -7600,6 +7600,34 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
        }
 
        len = min(len, em->len - (start - em->start));
+
+       /*
+        * If we have a NOWAIT request and the range contains multiple extents
+        * (or a mix of extents and holes), then we return -EAGAIN to make the
+        * caller fallback to a context where it can do a blocking (without
+        * NOWAIT) request. This way we avoid doing partial IO and returning
+        * success to the caller, which is not optimal for writes and for reads
+        * it can result in unexpected behaviour for an application.
+        *
+        * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
+        * iomap_dio_rw(), we can end up returning less data then what the caller
+        * asked for, resulting in an unexpected, and incorrect, short read.
+        * That is, the caller asked to read N bytes and we return less than that,
+        * which is wrong unless we are crossing EOF. This happens if we get a
+        * page fault error when trying to fault in pages for the buffer that is
+        * associated to the struct iov_iter passed to iomap_dio_rw(), and we
+        * have previously submitted bios for other extents in the range, in
+        * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
+        * those bios have completed by the time we get the page fault error,
+        * which we return back to our caller - we should only return EIOCBQUEUED
+        * after we have submitted bios for all the extents in the range.
+        */
+       if ((flags & IOMAP_NOWAIT) && len < length) {
+               free_extent_map(em);
+               ret = -EAGAIN;
+               goto unlock_err;
+       }
+
        if (write) {
                ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
                                                    start, len);
index 33eda39..8d47ec5 100644 (file)
@@ -1012,8 +1012,155 @@ out:
        return ret;
 }
 
+/*
+ * Defrag specific helper to get an extent map.
+ *
+ * Differences between this and btrfs_get_extent() are:
+ *
+ * - No extent_map will be added to inode->extent_tree
+ *   To reduce memory usage in the long run.
+ *
+ * - Extra optimization to skip file extents older than @newer_than
+ *   By using btrfs_search_forward() we can skip entire file ranges that
+ *   have extents created in past transactions, because btrfs_search_forward()
+ *   will not visit leaves and nodes with a generation smaller than given
+ *   minimal generation threshold (@newer_than).
+ *
+ * Return valid em if we find a file extent matching the requirement.
+ * Return NULL if we can not find a file extent matching the requirement.
+ *
+ * Return ERR_PTR() for error.
+ */
+static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
+                                           u64 start, u64 newer_than)
+{
+       struct btrfs_root *root = inode->root;
+       struct btrfs_file_extent_item *fi;
+       struct btrfs_path path = { 0 };
+       struct extent_map *em;
+       struct btrfs_key key;
+       u64 ino = btrfs_ino(inode);
+       int ret;
+
+       em = alloc_extent_map();
+       if (!em) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       key.objectid = ino;
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = start;
+
+       if (newer_than) {
+               ret = btrfs_search_forward(root, &key, &path, newer_than);
+               if (ret < 0)
+                       goto err;
+               /* Can't find anything newer */
+               if (ret > 0)
+                       goto not_found;
+       } else {
+               ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
+               if (ret < 0)
+                       goto err;
+       }
+       if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+               /*
+                * If btrfs_search_slot() makes path to point beyond nritems,
+                * we should not have an empty leaf, as this inode must at
+                * least have its INODE_ITEM.
+                */
+               ASSERT(btrfs_header_nritems(path.nodes[0]));
+               path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
+       }
+       btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+       /* Perfect match, no need to go one slot back */
+       if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
+           key.offset == start)
+               goto iterate;
+
+       /* We didn't find a perfect match, needs to go one slot back */
+       if (path.slots[0] > 0) {
+               btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+               if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+                       path.slots[0]--;
+       }
+
+iterate:
+       /* Iterate through the path to find a file extent covering @start */
+       while (true) {
+               u64 extent_end;
+
+               if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
+                       goto next;
+
+               btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+
+               /*
+                * We may go one slot back to INODE_REF/XATTR item, then
+                * need to go forward until we reach an EXTENT_DATA.
+                * But we should still has the correct ino as key.objectid.
+                */
+               if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
+                       goto next;
+
+               /* It's beyond our target range, definitely not extent found */
+               if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
+                       goto not_found;
+
+               /*
+                *      |       |<- File extent ->|
+                *      \- start
+                *
+                * This means there is a hole between start and key.offset.
+                */
+               if (key.offset > start) {
+                       em->start = start;
+                       em->orig_start = start;
+                       em->block_start = EXTENT_MAP_HOLE;
+                       em->len = key.offset - start;
+                       break;
+               }
+
+               fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
+                                   struct btrfs_file_extent_item);
+               extent_end = btrfs_file_extent_end(&path);
+
+               /*
+                *      |<- file extent ->|     |
+                *                              \- start
+                *
+                * We haven't reached start, search next slot.
+                */
+               if (extent_end <= start)
+                       goto next;
+
+               /* Now this extent covers @start, convert it to em */
+               btrfs_extent_item_to_extent_map(inode, &path, fi, false, em);
+               break;
+next:
+               ret = btrfs_next_item(root, &path);
+               if (ret < 0)
+                       goto err;
+               if (ret > 0)
+                       goto not_found;
+       }
+       btrfs_release_path(&path);
+       return em;
+
+not_found:
+       btrfs_release_path(&path);
+       free_extent_map(em);
+       return NULL;
+
+err:
+       btrfs_release_path(&path);
+       free_extent_map(em);
+       return ERR_PTR(ret);
+}
+
 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
-                                              bool locked)
+                                              u64 newer_than, bool locked)
 {
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -1028,6 +1175,20 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
        em = lookup_extent_mapping(em_tree, start, sectorsize);
        read_unlock(&em_tree->lock);
 
+       /*
+        * We can get a merged extent, in that case, we need to re-search
+        * tree to get the original em for defrag.
+        *
+        * If @newer_than is 0 or em::generation < newer_than, we can trust
+        * this em, as either we don't care about the generation, or the
+        * merged extent map will be rejected anyway.
+        */
+       if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) &&
+           newer_than && em->generation >= newer_than) {
+               free_extent_map(em);
+               em = NULL;
+       }
+
        if (!em) {
                struct extent_state *cached = NULL;
                u64 end = start + sectorsize - 1;
@@ -1035,7 +1196,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
                /* get the big lock and read metadata off disk */
                if (!locked)
                        lock_extent_bits(io_tree, start, end, &cached);
-               em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
+               em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
                if (!locked)
                        unlock_extent_cached(io_tree, start, end, &cached);
 
@@ -1046,23 +1207,42 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
        return em;
 }
 
+static u32 get_extent_max_capacity(const struct extent_map *em)
+{
+       if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+               return BTRFS_MAX_COMPRESSED;
+       return BTRFS_MAX_EXTENT_SIZE;
+}
+
 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
                                     bool locked)
 {
        struct extent_map *next;
-       bool ret = true;
+       bool ret = false;
 
        /* this is the last extent */
        if (em->start + em->len >= i_size_read(inode))
                return false;
 
-       next = defrag_lookup_extent(inode, em->start + em->len, locked);
+       /*
+        * We want to check if the next extent can be merged with the current
+        * one, which can be an extent created in a past generation, so we pass
+        * a minimum generation of 0 to defrag_lookup_extent().
+        */
+       next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
+       /* No more em or hole */
        if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
-               ret = false;
-       else if ((em->block_start + em->block_len == next->block_start) &&
-                (em->block_len > SZ_128K && next->block_len > SZ_128K))
-               ret = false;
-
+               goto out;
+       if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
+               goto out;
+       /*
+        * If the next extent is at its max capacity, defragging current extent
+        * makes no sense, as the total number of extents won't change.
+        */
+       if (next->len >= get_extent_max_capacity(em))
+               goto out;
+       ret = true;
+out:
        free_extent_map(next);
        return ret;
 }
@@ -1186,8 +1366,10 @@ struct defrag_target_range {
 static int defrag_collect_targets(struct btrfs_inode *inode,
                                  u64 start, u64 len, u32 extent_thresh,
                                  u64 newer_than, bool do_compress,
-                                 bool locked, struct list_head *target_list)
+                                 bool locked, struct list_head *target_list,
+                                 u64 *last_scanned_ret)
 {
+       bool last_is_target = false;
        u64 cur = start;
        int ret = 0;
 
@@ -1197,7 +1379,9 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                bool next_mergeable = true;
                u64 range_len;
 
-               em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
+               last_is_target = false;
+               em = defrag_lookup_extent(&inode->vfs_inode, cur,
+                                         newer_than, locked);
                if (!em)
                        break;
 
@@ -1210,6 +1394,10 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                if (em->generation < newer_than)
                        goto next;
 
+               /* This em is under writeback, no need to defrag */
+               if (em->generation == (u64)-1)
+                       goto next;
+
                /*
                 * Our start offset might be in the middle of an existing extent
                 * map, so take that into account.
@@ -1250,6 +1438,13 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                if (range_len >= extent_thresh)
                        goto next;
 
+               /*
+                * Skip extents already at its max capacity, this is mostly for
+                * compressed extents, which max cap is only 128K.
+                */
+               if (em->len >= get_extent_max_capacity(em))
+                       goto next;
+
                next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
                                                          locked);
                if (!next_mergeable) {
@@ -1268,6 +1463,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                }
 
 add:
+               last_is_target = true;
                range_len = min(extent_map_end(em), start + len) - cur;
                /*
                 * This one is a good target, check if it can be merged into
@@ -1311,6 +1507,17 @@ next:
                        kfree(entry);
                }
        }
+       if (!ret && last_scanned_ret) {
+               /*
+                * If the last extent is not a target, the caller can skip to
+                * the end of that extent.
+                * Otherwise, we can only go the end of the specified range.
+                */
+               if (!last_is_target)
+                       *last_scanned_ret = max(cur, *last_scanned_ret);
+               else
+                       *last_scanned_ret = max(start + len, *last_scanned_ret);
+       }
        return ret;
 }
 
@@ -1369,7 +1576,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
 }
 
 static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
-                           u32 extent_thresh, u64 newer_than, bool do_compress)
+                           u32 extent_thresh, u64 newer_than, bool do_compress,
+                           u64 *last_scanned_ret)
 {
        struct extent_state *cached_state = NULL;
        struct defrag_target_range *entry;
@@ -1415,7 +1623,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
         */
        ret = defrag_collect_targets(inode, start, len, extent_thresh,
                                     newer_than, do_compress, true,
-                                    &target_list);
+                                    &target_list, last_scanned_ret);
        if (ret < 0)
                goto unlock_extent;
 
@@ -1450,7 +1658,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
                              u64 start, u32 len, u32 extent_thresh,
                              u64 newer_than, bool do_compress,
                              unsigned long *sectors_defragged,
-                             unsigned long max_sectors)
+                             unsigned long max_sectors,
+                             u64 *last_scanned_ret)
 {
        const u32 sectorsize = inode->root->fs_info->sectorsize;
        struct defrag_target_range *entry;
@@ -1461,7 +1670,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
        BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
        ret = defrag_collect_targets(inode, start, len, extent_thresh,
                                     newer_than, do_compress, false,
-                                    &target_list);
+                                    &target_list, NULL);
        if (ret < 0)
                goto out;
 
@@ -1478,6 +1687,15 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
                        range_len = min_t(u32, range_len,
                                (max_sectors - *sectors_defragged) * sectorsize);
 
+               /*
+                * If defrag_one_range() has updated last_scanned_ret,
+                * our range may already be invalid (e.g. hole punched).
+                * Skip if our range is before last_scanned_ret, as there is
+                * no need to defrag the range anymore.
+                */
+               if (entry->start + range_len <= *last_scanned_ret)
+                       continue;
+
                if (ra)
                        page_cache_sync_readahead(inode->vfs_inode.i_mapping,
                                ra, NULL, entry->start >> PAGE_SHIFT,
@@ -1490,7 +1708,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
                 * accounting.
                 */
                ret = defrag_one_range(inode, entry->start, range_len,
-                                      extent_thresh, newer_than, do_compress);
+                                      extent_thresh, newer_than, do_compress,
+                                      last_scanned_ret);
                if (ret < 0)
                        break;
                *sectors_defragged += range_len >>
@@ -1501,6 +1720,8 @@ out:
                list_del_init(&entry->list);
                kfree(entry);
        }
+       if (ret >= 0)
+               *last_scanned_ret = max(*last_scanned_ret, start + len);
        return ret;
 }
 
@@ -1586,6 +1807,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
 
        while (cur < last_byte) {
                const unsigned long prev_sectors_defragged = sectors_defragged;
+               u64 last_scanned = cur;
                u64 cluster_end;
 
                /* The cluster size 256K should always be page aligned */
@@ -1615,8 +1837,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
                        BTRFS_I(inode)->defrag_compress = compress_type;
                ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
                                cluster_end + 1 - cur, extent_thresh,
-                               newer_than, do_compress,
-                               &sectors_defragged, max_to_defrag);
+                               newer_than, do_compress, &sectors_defragged,
+                               max_to_defrag, &last_scanned);
 
                if (sectors_defragged > prev_sectors_defragged)
                        balance_dirty_pages_ratelimited(inode->i_mapping);
@@ -1624,11 +1846,12 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
                btrfs_inode_unlock(inode, 0);
                if (ret < 0)
                        break;
-               cur = cluster_end + 1;
+               cur = max(cluster_end + 1, last_scanned);
                if (ret > 0) {
                        ret = 0;
                        break;
                }
+               cond_resched();
        }
 
        if (ra_allocated)
index 0fb90cb..e6e28a9 100644 (file)
@@ -380,6 +380,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
                kunmap(cur_page);
                cur_in += LZO_LEN;
 
+               if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) {
+                       /*
+                        * seg_len shouldn't be larger than we have allocated
+                        * for workspace->cbuf
+                        */
+                       btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
+                                       seg_len);
+                       ret = -EIO;
+                       goto out;
+               }
+
                /* Copy the compressed segment payload into workspace */
                copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
 
index f12dc68..30d42ea 100644 (file)
@@ -1196,6 +1196,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        if (!fs_info->quota_root)
                goto out;
 
+       /*
+        * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
+        * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
+        * to lock that mutex while holding a transaction handle and the rescan
+        * worker needs to commit a transaction.
+        */
+       mutex_unlock(&fs_info->qgroup_ioctl_lock);
+
        /*
         * Request qgroup rescan worker to complete and wait for it. This wait
         * must be done before transaction start for quota disable since it may
@@ -1203,7 +1211,6 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
         */
        clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
        btrfs_qgroup_wait_for_completion(fs_info, false);
-       mutex_unlock(&fs_info->qgroup_ioctl_lock);
 
        /*
         * 1 For the root item
index f546519..9d80548 100644 (file)
@@ -3960,6 +3960,19 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
        int rw = 0;
        int err = 0;
 
+       /*
+        * This only gets set if we had a half-deleted snapshot on mount.  We
+        * cannot allow relocation to start while we're still trying to clean up
+        * these pending deletions.
+        */
+       ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
+       if (ret)
+               return ret;
+
+       /* We may have been woken up by close_ctree, so bail if we're closing. */
+       if (btrfs_fs_closing(fs_info))
+               return -EINTR;
+
        bg = btrfs_lookup_block_group(fs_info, group_start);
        if (!bg)
                return -ENOENT;
index 3d68d2d..ca7426e 100644 (file)
@@ -278,6 +278,21 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
 
                WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state));
                if (btrfs_root_refs(&root->root_item) == 0) {
+                       struct btrfs_key drop_key;
+
+                       btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress);
+                       /*
+                        * If we have a non-zero drop_progress then we know we
+                        * made it partly through deleting this snapshot, and
+                        * thus we need to make sure we block any balance from
+                        * happening until this snapshot is completely dropped.
+                        */
+                       if (drop_key.objectid != 0 || drop_key.type != 0 ||
+                           drop_key.offset != 0) {
+                               set_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
+                               set_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
+                       }
+
                        set_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
                        btrfs_add_dead_root(root);
                }
index d8ccb62..201eb26 100644 (file)
@@ -4999,6 +4999,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
                        lock_page(page);
                        if (!PageUptodate(page)) {
                                unlock_page(page);
+                               btrfs_err(fs_info,
+                       "send: IO error at offset %llu for inode %llu root %llu",
+                                       page_offset(page), sctx->cur_ino,
+                                       sctx->send_root->root_key.objectid);
                                put_page(page);
                                ret = -EIO;
                                break;
index 29bd8c7..ef7ae20 100644 (file)
@@ -736,7 +736,7 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
         * Since we own the page lock, no one else could touch subpage::writers
         * and we are safe to do several atomic operations without spinlock.
         */
-       if (atomic_read(&subpage->writers))
+       if (atomic_read(&subpage->writers) == 0)
                /* No writers, locked by plain lock_page() */
                return unlock_page(page);
 
index c43bbc7..1f1c25d 100644 (file)
@@ -854,7 +854,37 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
 static noinline void wait_for_commit(struct btrfs_transaction *commit,
                                     const enum btrfs_trans_state min_state)
 {
-       wait_event(commit->commit_wait, commit->state >= min_state);
+       struct btrfs_fs_info *fs_info = commit->fs_info;
+       u64 transid = commit->transid;
+       bool put = false;
+
+       while (1) {
+               wait_event(commit->commit_wait, commit->state >= min_state);
+               if (put)
+                       btrfs_put_transaction(commit);
+
+               if (min_state < TRANS_STATE_COMPLETED)
+                       break;
+
+               /*
+                * A transaction isn't really completed until all of the
+                * previous transactions are completed, but with fsync we can
+                * end up with SUPER_COMMITTED transactions before a COMPLETED
+                * transaction. Wait for those.
+                */
+
+               spin_lock(&fs_info->trans_lock);
+               commit = list_first_entry_or_null(&fs_info->trans_list,
+                                                 struct btrfs_transaction,
+                                                 list);
+               if (!commit || commit->transid > transid) {
+                       spin_unlock(&fs_info->trans_lock);
+                       break;
+               }
+               refcount_inc(&commit->use_count);
+               put = true;
+               spin_unlock(&fs_info->trans_lock);
+       }
 }
 
 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
@@ -1319,6 +1349,32 @@ again:
        return 0;
 }
 
+/*
+ * If we had a pending drop we need to see if there are any others left in our
+ * dead roots list, and if not clear our bit and wake any waiters.
+ */
+void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
+{
+       /*
+        * We put the drop in progress roots at the front of the list, so if the
+        * first entry doesn't have UNFINISHED_DROP set we can wake everybody
+        * up.
+        */
+       spin_lock(&fs_info->trans_lock);
+       if (!list_empty(&fs_info->dead_roots)) {
+               struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
+                                                          struct btrfs_root,
+                                                          root_list);
+               if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
+                       spin_unlock(&fs_info->trans_lock);
+                       return;
+               }
+       }
+       spin_unlock(&fs_info->trans_lock);
+
+       btrfs_wake_unfinished_drop(fs_info);
+}
+
 /*
  * dead roots are old snapshots that need to be deleted.  This allocates
  * a dirty root struct and adds it into the list of dead roots that need to
@@ -1331,7 +1387,12 @@ void btrfs_add_dead_root(struct btrfs_root *root)
        spin_lock(&fs_info->trans_lock);
        if (list_empty(&root->root_list)) {
                btrfs_grab_root(root);
-               list_add_tail(&root->root_list, &fs_info->dead_roots);
+
+               /* We want to process the partially complete drops first. */
+               if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
+                       list_add(&root->root_list, &fs_info->dead_roots);
+               else
+                       list_add_tail(&root->root_list, &fs_info->dead_roots);
        }
        spin_unlock(&fs_info->trans_lock);
 }
@@ -1981,16 +2042,24 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
        /*
-        * We use writeback_inodes_sb here because if we used
+        * We use try_to_writeback_inodes_sb() here because if we used
         * btrfs_start_delalloc_roots we would deadlock with fs freeze.
         * Currently are holding the fs freeze lock, if we do an async flush
         * we'll do btrfs_join_transaction() and deadlock because we need to
         * wait for the fs freeze lock.  Using the direct flushing we benefit
         * from already being in a transaction and our join_transaction doesn't
         * have to re-take the fs freeze lock.
+        *
+        * Note that try_to_writeback_inodes_sb() will only trigger writeback
+        * if it can read lock sb->s_umount. It will always be able to lock it,
+        * except when the filesystem is being unmounted or being frozen, but in
+        * those cases sync_filesystem() is called, which results in calling
+        * writeback_inodes_sb() while holding a write lock on sb->s_umount.
+        * Note that we don't call writeback_inodes_sb() directly, because it
+        * will emit a warning if sb->s_umount is not locked.
         */
        if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
-               writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+               try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
        return 0;
 }
 
index 9402d8d..ba8a982 100644 (file)
@@ -216,6 +216,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
 
 void btrfs_add_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root);
+void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
index 9fd145f..aae5697 100644 (file)
@@ -1682,6 +1682,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
         */
        for (slot = 0; slot < nritems; slot++) {
                u32 item_end_expected;
+               u64 item_data_end;
                int ret;
 
                btrfs_item_key_to_cpu(leaf, &key, slot);
@@ -1696,6 +1697,8 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
                        return -EUCLEAN;
                }
 
+               item_data_end = (u64)btrfs_item_offset(leaf, slot) +
+                               btrfs_item_size(leaf, slot);
                /*
                 * Make sure the offset and ends are right, remember that the
                 * item data starts at the end of the leaf and grows towards the
@@ -1706,11 +1709,10 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
                else
                        item_end_expected = btrfs_item_offset(leaf,
                                                                 slot - 1);
-               if (unlikely(btrfs_item_data_end(leaf, slot) != item_end_expected)) {
+               if (unlikely(item_data_end != item_end_expected)) {
                        generic_err(leaf, slot,
-                               "unexpected item end, have %u expect %u",
-                               btrfs_item_data_end(leaf, slot),
-                               item_end_expected);
+                               "unexpected item end, have %llu expect %u",
+                               item_data_end, item_end_expected);
                        return -EUCLEAN;
                }
 
@@ -1719,12 +1721,10 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
                 * just in case all the items are consistent to each other, but
                 * all point outside of the leaf.
                 */
-               if (unlikely(btrfs_item_data_end(leaf, slot) >
-                            BTRFS_LEAF_DATA_SIZE(fs_info))) {
+               if (unlikely(item_data_end > BTRFS_LEAF_DATA_SIZE(fs_info))) {
                        generic_err(leaf, slot,
-                       "slot end outside of leaf, have %u expect range [0, %u]",
-                               btrfs_item_data_end(leaf, slot),
-                               BTRFS_LEAF_DATA_SIZE(fs_info));
+                       "slot end outside of leaf, have %llu expect range [0, %u]",
+                               item_data_end, BTRFS_LEAF_DATA_SIZE(fs_info));
                        return -EUCLEAN;
                }
 
index 3ee014c..6bc8834 100644 (file)
@@ -1362,6 +1362,15 @@ again:
                                                 inode, name, namelen);
                        kfree(name);
                        iput(dir);
+                       /*
+                        * Whenever we need to check if a name exists or not, we
+                        * check the subvolume tree. So after an unlink we must
+                        * run delayed items, so that future checks for a name
+                        * during log replay see that the name does not exists
+                        * anymore.
+                        */
+                       if (!ret)
+                               ret = btrfs_run_delayed_items(trans);
                        if (ret)
                                goto out;
                        goto again;
@@ -1614,6 +1623,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                                 */
                                if (!ret && inode->i_nlink == 0)
                                        inc_nlink(inode);
+                               /*
+                                * Whenever we need to check if a name exists or
+                                * not, we check the subvolume tree. So after an
+                                * unlink we must run delayed items, so that future
+                                * checks for a name during log replay see that the
+                                * name does not exists anymore.
+                                */
+                               if (!ret)
+                                       ret = btrfs_run_delayed_items(trans);
                        }
                        if (ret < 0)
                                goto out;
@@ -4635,7 +4653,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
 
 /*
  * Log all prealloc extents beyond the inode's i_size to make sure we do not
- * lose them after doing a fast fsync and replaying the log. We scan the
+ * lose them after doing a full/fast fsync and replaying the log. We scan the
  * subvolume's root instead of iterating the inode's extent map tree because
  * otherwise we can log incorrect extent items based on extent map conversion.
  * That can happen due to the fact that extent maps are merged when they
@@ -5414,6 +5432,7 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
                                   struct btrfs_log_ctx *ctx,
                                   bool *need_log_inode_item)
 {
+       const u64 i_size = i_size_read(&inode->vfs_inode);
        struct btrfs_root *root = inode->root;
        int ins_start_slot = 0;
        int ins_nr = 0;
@@ -5434,13 +5453,21 @@ again:
                if (min_key->type > max_key->type)
                        break;
 
-               if (min_key->type == BTRFS_INODE_ITEM_KEY)
+               if (min_key->type == BTRFS_INODE_ITEM_KEY) {
                        *need_log_inode_item = false;
-
-               if ((min_key->type == BTRFS_INODE_REF_KEY ||
-                    min_key->type == BTRFS_INODE_EXTREF_KEY) &&
-                   inode->generation == trans->transid &&
-                   !recursive_logging) {
+               } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
+                          min_key->offset >= i_size) {
+                       /*
+                        * Extents at and beyond eof are logged with
+                        * btrfs_log_prealloc_extents().
+                        * Only regular files have BTRFS_EXTENT_DATA_KEY keys,
+                        * and no keys greater than that, so bail out.
+                        */
+                       break;
+               } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
+                           min_key->type == BTRFS_INODE_EXTREF_KEY) &&
+                          inode->generation == trans->transid &&
+                          !recursive_logging) {
                        u64 other_ino = 0;
                        u64 other_parent = 0;
 
@@ -5471,10 +5498,8 @@ again:
                                btrfs_release_path(path);
                                goto next_key;
                        }
-               }
-
-               /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
-               if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
+               } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
+                       /* Skip xattrs, logged later with btrfs_log_all_xattrs() */
                        if (ins_nr == 0)
                                goto next_slot;
                        ret = copy_items(trans, inode, dst_path, path,
@@ -5527,9 +5552,21 @@ next_key:
                        break;
                }
        }
-       if (ins_nr)
+       if (ins_nr) {
                ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
                                 ins_nr, inode_only, logged_isize);
+               if (ret)
+                       return ret;
+       }
+
+       if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
+               /*
+                * Release the path because otherwise we might attempt to double
+                * lock the same leaf with btrfs_log_prealloc_extents() below.
+                */
+               btrfs_release_path(path);
+               ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
+       }
 
        return ret;
 }
index 51c968c..ae93cee 100644 (file)
@@ -254,7 +254,7 @@ static bool cachefiles_shorten_object(struct cachefiles_object *object,
                ret = cachefiles_inject_write_error();
                if (ret == 0)
                        ret = vfs_fallocate(file, FALLOC_FL_ZERO_RANGE,
-                                           new_size, dio_size);
+                                           new_size, dio_size - new_size);
                if (ret < 0) {
                        trace_cachefiles_io_error(object, file_inode(file), ret,
                                                  cachefiles_trace_fallocate_error);
index 463ebe3..cdce160 100644 (file)
@@ -396,11 +396,11 @@ static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const ch
        switch (state) {
        case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
                cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
-               cifs_reconnect(swnreg->tcon->ses->server, true);
+               cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
                break;
        case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
                cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
-               cifs_reconnect(swnreg->tcon->ses->server, true);
+               cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
                break;
        case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
                cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
@@ -498,7 +498,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
                goto unlock;
        }
 
-       cifs_reconnect(tcon->ses->server, false);
+       cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, false);
 
 unlock:
        mutex_unlock(&tcon->ses->server->srv_mutex);
index ee3aab3..bf861fe 100644 (file)
@@ -949,6 +949,9 @@ static void populate_new_aces(char *nacl_base,
                pnntace = (struct cifs_ace *) (nacl_base + nsize);
                nsize += setup_special_mode_ACE(pnntace, nmode);
                num_aces++;
+               pnntace = (struct cifs_ace *) (nacl_base + nsize);
+               nsize += setup_authusers_ACE(pnntace);
+               num_aces++;
                goto set_size;
        }
 
@@ -1297,7 +1300,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
 
                if (uid_valid(uid)) { /* chown */
                        uid_t id;
-                       nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+                       nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
                                                                GFP_KERNEL);
                        if (!nowner_sid_ptr) {
                                rc = -ENOMEM;
@@ -1326,7 +1329,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
                }
                if (gid_valid(gid)) { /* chgrp */
                        gid_t id;
-                       ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+                       ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
                                                                GFP_KERNEL);
                        if (!ngroup_sid_ptr) {
                                rc = -ENOMEM;
@@ -1613,7 +1616,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
        nsecdesclen = secdesclen;
        if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
                if (mode_from_sid)
-                       nsecdesclen += sizeof(struct cifs_ace);
+                       nsecdesclen += 2 * sizeof(struct cifs_ace);
                else /* cifsacl */
                        nsecdesclen += 5 * sizeof(struct cifs_ace);
        } else { /* chown */
index 199edac..082c214 100644 (file)
@@ -919,6 +919,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
 
 out_super:
        deactivate_locked_super(sb);
+       return root;
 out:
        if (cifs_sb) {
                kfree(cifs_sb->prepath);
index 0b742bd..053cb44 100644 (file)
@@ -175,11 +175,6 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
        struct TCP_Server_Info *pserver;
        struct cifs_ses *ses;
        struct cifs_tcon *tcon;
-       struct mid_q_entry *mid, *nmid;
-       struct list_head retry_list;
-
-       server->maxBuf = 0;
-       server->max_read = 0;
 
        /*
         * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
@@ -219,6 +214,16 @@ next_session:
                spin_unlock(&ses->chan_lock);
        }
        spin_unlock(&cifs_tcp_ses_lock);
+}
+
+static void
+cifs_abort_connection(struct TCP_Server_Info *server)
+{
+       struct mid_q_entry *mid, *nmid;
+       struct list_head retry_list;
+
+       server->maxBuf = 0;
+       server->max_read = 0;
 
        /* do not want to be sending data on a socket we are freeing */
        cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
@@ -310,6 +315,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
 
        cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
 
+       cifs_abort_connection(server);
+
        do {
                try_to_freeze();
                mutex_lock(&server->srv_mutex);
@@ -434,6 +441,8 @@ reconnect_dfs_server(struct TCP_Server_Info *server,
 
        cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
 
+       cifs_abort_connection(server);
+
        do {
                try_to_freeze();
                mutex_lock(&server->srv_mutex);
@@ -639,6 +648,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
 
                if (server->tcpStatus == CifsNeedReconnect) {
                        spin_unlock(&cifs_tcp_ses_lock);
+                       cifs_reconnect(server, false);
                        return -ECONNABORTED;
                }
                spin_unlock(&cifs_tcp_ses_lock);
@@ -2340,10 +2350,19 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
                if (ses->server->posix_ext_supported) {
                        tcon->posix_extensions = true;
                        pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
-               } else {
+               } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
+                   (strcmp(ses->server->vals->version_string,
+                    SMB3ANY_VERSION_STRING) == 0) ||
+                   (strcmp(ses->server->vals->version_string,
+                    SMBDEFAULT_VERSION_STRING) == 0)) {
                        cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
                        rc = -EOPNOTSUPP;
                        goto out_fail;
+               } else {
+                       cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+                               "disabled but required for POSIX extensions\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
                }
        }
 
index dd96437..831f424 100644 (file)
@@ -1355,7 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
        }
 
        cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
-       cifs_reconnect(tcon->ses->server, true);
+       cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, true);
 }
 
 /* Refresh dfs referral of tcon and mark it for reconnect if needed */
index 7ec35f3..a92e9ee 100644 (file)
@@ -149,7 +149,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
        fsparam_u32("echo_interval", Opt_echo_interval),
        fsparam_u32("max_credits", Opt_max_credits),
        fsparam_u32("handletimeout", Opt_handletimeout),
-       fsparam_u32("snapshot", Opt_snapshot),
+       fsparam_u64("snapshot", Opt_snapshot),
        fsparam_u32("max_channels", Opt_max_channels),
 
        /* Mount options which take string value */
@@ -1078,7 +1078,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                ctx->echo_interval = result.uint_32;
                break;
        case Opt_snapshot:
-               ctx->snapshot_time = result.uint_32;
+               ctx->snapshot_time = result.uint_64;
                break;
        case Opt_max_credits:
                if (result.uint_32 < 20 || result.uint_32 > 60000) {
index 5723d50..32f478c 100644 (file)
@@ -127,11 +127,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
        struct cifs_server_iface *ifaces = NULL;
        size_t iface_count;
 
-       if (ses->server->dialect < SMB30_PROT_ID) {
-               cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
-               return 0;
-       }
-
        spin_lock(&ses->chan_lock);
 
        new_chan_count = old_chan_count = ses->chan_count;
@@ -145,6 +140,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
                return 0;
        }
 
+       if (ses->server->dialect < SMB30_PROT_ID) {
+               spin_unlock(&ses->chan_lock);
+               cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+               return 0;
+       }
+
        if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
                ses->chan_max = 1;
                spin_unlock(&ses->chan_lock);
index 8272c91..b2fb7bd 100644 (file)
@@ -228,9 +228,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
        spin_unlock(&GlobalMid_Lock);
 
        if (reconnect) {
-               spin_lock(&cifs_tcp_ses_lock);
-               server->tcpStatus = CifsNeedReconnect;
-               spin_unlock(&cifs_tcp_ses_lock);
+               cifs_mark_tcp_ses_conns_for_reconnect(server, false);
        }
 
        return mid;
index 8540f7c..a4c3e02 100644 (file)
@@ -430,10 +430,7 @@ unmask:
                 * be taken as the remainder of this one. We need to kill the
                 * socket so the server throws away the partial SMB
                 */
-               spin_lock(&cifs_tcp_ses_lock);
-               if (server->tcpStatus != CifsExiting)
-                       server->tcpStatus = CifsNeedReconnect;
-               spin_unlock(&cifs_tcp_ses_lock);
+               cifs_mark_tcp_ses_conns_for_reconnect(server, false);
                trace_smb3_partial_send_reconnect(server->CurrentMid,
                                                  server->conn_id, server->hostname);
        }
index 7d8b72d..9d486fb 100644 (file)
@@ -175,11 +175,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
                                switch (handler->flags) {
                                case XATTR_CIFS_NTSD_FULL:
                                        aclflags = (CIFS_ACL_OWNER |
+                                                   CIFS_ACL_GROUP |
                                                    CIFS_ACL_DACL |
                                                    CIFS_ACL_SACL);
                                        break;
                                case XATTR_CIFS_NTSD:
                                        aclflags = (CIFS_ACL_OWNER |
+                                                   CIFS_ACL_GROUP |
                                                    CIFS_ACL_DACL);
                                        break;
                                case XATTR_CIFS_ACL:
index d3cd2a9..d1f9d26 100644 (file)
  */
 DEFINE_SPINLOCK(configfs_dirent_lock);
 
+/*
+ * All of link_obj/unlink_obj/link_group/unlink_group require that
+ * subsys->su_mutex is held.
+ * But parent configfs_subsystem is NULL when config_item is root.
+ * Use this mutex when config_item is root.
+ */
+static DEFINE_MUTEX(configfs_subsystem_mutex);
+
 static void configfs_d_iput(struct dentry * dentry,
                            struct inode * inode)
 {
@@ -1859,7 +1867,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
                group->cg_item.ci_name = group->cg_item.ci_namebuf;
 
        sd = root->d_fsdata;
+       mutex_lock(&configfs_subsystem_mutex);
        link_group(to_config_group(sd->s_element), group);
+       mutex_unlock(&configfs_subsystem_mutex);
 
        inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
 
@@ -1884,7 +1894,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
        inode_unlock(d_inode(root));
 
        if (err) {
+               mutex_lock(&configfs_subsystem_mutex);
                unlink_group(group);
+               mutex_unlock(&configfs_subsystem_mutex);
                configfs_release_fs();
        }
        put_fragment(frag);
@@ -1931,7 +1943,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
 
        dput(dentry);
 
+       mutex_lock(&configfs_subsystem_mutex);
        unlink_group(group);
+       mutex_unlock(&configfs_subsystem_mutex);
        configfs_release_fs();
 }
 
index b8272fb..5aa2cf2 100644 (file)
@@ -325,7 +325,7 @@ struct erofs_inode {
                        unsigned char  z_algorithmtype[2];
                        unsigned char  z_logical_clusterbits;
                        unsigned long  z_tailextent_headlcn;
-                       unsigned int   z_idataoff;
+                       erofs_off_t    z_idataoff;
                        unsigned short z_idata_size;
                };
 #endif /* CONFIG_EROFS_FS_ZIP */
index 57edef1..7d2e692 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/task_work.h>
 #include <linux/ima.h>
 #include <linux/swap.h>
+#include <linux/kmemleak.h>
 
 #include <linux/atomic.h>
 
@@ -119,6 +120,11 @@ static struct ctl_table fs_stat_sysctls[] = {
 static int __init init_fs_stat_sysctls(void)
 {
        register_sysctl_init("fs", fs_stat_sysctls);
+       if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+               struct ctl_table_header *hdr;
+               hdr = register_sysctl_mount_point("fs/binfmt_misc");
+               kmemleak_not_leak(hdr);
+       }
        return 0;
 }
 fs_initcall(init_fs_stat_sysctls);
index 3e718cf..8c39a85 100644 (file)
@@ -704,10 +704,11 @@ static int gfs2_release(struct inode *inode, struct file *file)
        kfree(file->private_data);
        file->private_data = NULL;
 
-       if (gfs2_rs_active(&ip->i_res))
-               gfs2_rs_delete(ip, &inode->i_writecount);
-       if (file->f_mode & FMODE_WRITE)
+       if (file->f_mode & FMODE_WRITE) {
+               if (gfs2_rs_active(&ip->i_res))
+                       gfs2_rs_delete(ip, &inode->i_writecount);
                gfs2_qa_put(ip);
+       }
        return 0;
 }
 
index b7ab843..6b23399 100644 (file)
@@ -301,9 +301,6 @@ void gfs2_glock_queue_put(struct gfs2_glock *gl)
 
 void gfs2_glock_put(struct gfs2_glock *gl)
 {
-       /* last put could call sleepable dlm api */
-       might_sleep();
-
        if (lockref_put_or_lock(&gl->gl_lockref))
                return;
 
index 2e04f71..4715980 100644 (file)
@@ -4567,6 +4567,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
                } else {
                        list_add_tail(&buf->list, &(*head)->list);
                }
+               cond_resched();
        }
 
        return i ? i : -ENOMEM;
@@ -5228,7 +5229,6 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
                min_ret = iov_iter_count(&msg.msg_iter);
 
        ret = sock_recvmsg(sock, &msg, flags);
-out_free:
        if (ret < min_ret) {
                if (ret == -EAGAIN && force_nonblock)
                        return -EAGAIN;
@@ -5236,9 +5236,9 @@ out_free:
                        ret = -EINTR;
                req_set_fail(req);
        } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+out_free:
                req_set_fail(req);
        }
-
        __io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
        return 0;
 }
@@ -7694,7 +7694,7 @@ static int io_run_task_work_sig(void)
 /* when returns >0, the caller should retry */
 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                                          struct io_wait_queue *iowq,
-                                         signed long *timeout)
+                                         ktime_t timeout)
 {
        int ret;
 
@@ -7706,8 +7706,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
        if (test_bit(0, &ctx->check_cq_overflow))
                return 1;
 
-       *timeout = schedule_timeout(*timeout);
-       return !*timeout ? -ETIME : 1;
+       if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+               return -ETIME;
+       return 1;
 }
 
 /*
@@ -7720,7 +7721,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 {
        struct io_wait_queue iowq;
        struct io_rings *rings = ctx->rings;
-       signed long timeout = MAX_SCHEDULE_TIMEOUT;
+       ktime_t timeout = KTIME_MAX;
        int ret;
 
        do {
@@ -7736,7 +7737,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
                if (get_timespec64(&ts, uts))
                        return -EFAULT;
-               timeout = timespec64_to_jiffies(&ts);
+               timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
        }
 
        if (sig) {
@@ -7768,7 +7769,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                }
                prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
-               ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+               ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
                finish_wait(&ctx->cq_wait, &iowq.wq);
                cond_resched();
        } while (ret > 0);
@@ -7925,7 +7926,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
                ret = wait_for_completion_interruptible(&data->done);
                if (!ret) {
                        mutex_lock(&ctx->uring_lock);
-                       break;
+                       if (atomic_read(&data->refs) > 0) {
+                               /*
+                                * it has been revived by another thread while
+                                * we were unlocked
+                                */
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               break;
+                       }
                }
 
                atomic_inc(&data->refs);
@@ -8933,10 +8942,9 @@ static void io_mem_free(void *ptr)
 
 static void *io_mem_alloc(size_t size)
 {
-       gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
-                               __GFP_NORETRY | __GFP_ACCOUNT;
+       gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
 
-       return (void *) __get_free_pages(gfp_flags, get_order(size));
+       return (void *) __get_free_pages(gfp, get_order(size));
 }
 
 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
index dc3d061..911444d 100644 (file)
@@ -29,6 +29,7 @@
 #include "mgmt/user_config.h"
 #include "crypto_ctx.h"
 #include "transport_ipc.h"
+#include "../smbfs_common/arc4.h"
 
 /*
  * Fixed format data defining GSS header and fixed string
@@ -336,6 +337,29 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
                                nt_len - CIFS_ENCPWD_SIZE,
                                domain_name, conn->ntlmssp.cryptkey);
        kfree(domain_name);
+
+       /* The recovered secondary session key */
+       if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) {
+               struct arc4_ctx *ctx_arc4;
+               unsigned int sess_key_off, sess_key_len;
+
+               sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset);
+               sess_key_len = le16_to_cpu(authblob->SessionKey.Length);
+
+               if (blob_len < (u64)sess_key_off + sess_key_len)
+                       return -EINVAL;
+
+               ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+               if (!ctx_arc4)
+                       return -ENOMEM;
+
+               cifs_arc4_setkey(ctx_arc4, sess->sess_key,
+                                SMB2_NTLMV2_SESSKEY_SIZE);
+               cifs_arc4_crypt(ctx_arc4, sess->sess_key,
+                               (char *)authblob + sess_key_off, sess_key_len);
+               kfree_sensitive(ctx_arc4);
+       }
+
        return ret;
 }
 
@@ -408,6 +432,9 @@ ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
            (cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
                flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
 
+       if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH)
+               flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+
        chgblob->NegotiateFlags = cpu_to_le32(flags);
        len = strlen(ksmbd_netbios_name());
        name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
index 1866c81..67e8e28 100644 (file)
@@ -2688,7 +2688,7 @@ int smb2_open(struct ksmbd_work *work)
                                        (struct create_posix *)context;
                                if (le16_to_cpu(context->DataOffset) +
                                    le32_to_cpu(context->DataLength) <
-                                   sizeof(struct create_posix)) {
+                                   sizeof(struct create_posix) - 4) {
                                        rc = -EINVAL;
                                        goto err_out1;
                                }
@@ -3422,9 +3422,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
                goto free_conv_name;
        }
 
-       struct_sz = readdir_info_level_struct_sz(info_level);
-       next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
-                                 KSMBD_DIR_INFO_ALIGNMENT);
+       struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
+       next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
+       d_info->last_entry_off_align = next_entry_offset - struct_sz;
 
        if (next_entry_offset > d_info->out_buf_len) {
                d_info->out_buf_len = 0;
@@ -3976,6 +3976,7 @@ int smb2_query_dir(struct ksmbd_work *work)
                ((struct file_directory_info *)
                ((char *)rsp->Buffer + d_info.last_entry_offset))
                ->NextEntryOffset = 0;
+               d_info.data_count -= d_info.last_entry_off_align;
 
                rsp->StructureSize = cpu_to_le16(9);
                rsp->OutputBufferOffset = cpu_to_le16(72);
@@ -6126,13 +6127,26 @@ static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
                                        __le16 ChannelInfoOffset,
                                        __le16 ChannelInfoLength)
 {
+       unsigned int i, ch_count;
+
        if (work->conn->dialect == SMB30_PROT_ID &&
            Channel != SMB2_CHANNEL_RDMA_V1)
                return -EINVAL;
 
-       if (ChannelInfoOffset == 0 ||
-           le16_to_cpu(ChannelInfoLength) < sizeof(*desc))
+       ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc);
+       if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) {
+               for (i = 0; i < ch_count; i++) {
+                       pr_info("RDMA r/w request %#x: token %#x, length %#x\n",
+                               i,
+                               le32_to_cpu(desc[i].token),
+                               le32_to_cpu(desc[i].length));
+               }
+       }
+       if (ch_count != 1) {
+               ksmbd_debug(RDMA, "RDMA multiple buffer descriptors %d are not supported yet\n",
+                           ch_count);
                return -EINVAL;
+       }
 
        work->need_invalidate_rkey =
                (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
@@ -6185,9 +6199,15 @@ int smb2_read(struct ksmbd_work *work)
 
        if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
            req->Channel == SMB2_CHANNEL_RDMA_V1) {
+               unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
+
+               if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
+                       err = -EINVAL;
+                       goto out;
+               }
                err = smb2_set_remote_key_for_rdma(work,
                                                   (struct smb2_buffer_desc_v1 *)
-                                                  &req->Buffer[0],
+                                                  ((char *)req + ch_offset),
                                                   req->Channel,
                                                   req->ReadChannelInfoOffset,
                                                   req->ReadChannelInfoLength);
@@ -6428,11 +6448,16 @@ int smb2_write(struct ksmbd_work *work)
 
        if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
            req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
-               if (req->Length != 0 || req->DataOffset != 0)
-                       return -EINVAL;
+               unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
+
+               if (req->Length != 0 || req->DataOffset != 0 ||
+                   ch_offset < offsetof(struct smb2_write_req, Buffer)) {
+                       err = -EINVAL;
+                       goto out;
+               }
                err = smb2_set_remote_key_for_rdma(work,
                                                   (struct smb2_buffer_desc_v1 *)
-                                                  &req->Buffer[0],
+                                                  ((char *)req + ch_offset),
                                                   req->Channel,
                                                   req->WriteChannelInfoOffset,
                                                   req->WriteChannelInfoLength);
index ef7f42b..9a7e211 100644 (file)
@@ -308,14 +308,17 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
        for (i = 0; i < 2; i++) {
                struct kstat kstat;
                struct ksmbd_kstat ksmbd_kstat;
+               struct dentry *dentry;
 
                if (!dir->dot_dotdot[i]) { /* fill dot entry info */
                        if (i == 0) {
                                d_info->name = ".";
                                d_info->name_len = 1;
+                               dentry = dir->filp->f_path.dentry;
                        } else {
                                d_info->name = "..";
                                d_info->name_len = 2;
+                               dentry = dir->filp->f_path.dentry->d_parent;
                        }
 
                        if (!match_pattern(d_info->name, d_info->name_len,
@@ -327,7 +330,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
                        ksmbd_kstat.kstat = &kstat;
                        ksmbd_vfs_fill_dentry_attrs(work,
                                                    user_ns,
-                                                   dir->filp->f_path.dentry->d_parent,
+                                                   dentry,
                                                    &ksmbd_kstat);
                        rc = fn(conn, info_level, d_info, &ksmbd_kstat);
                        if (rc)
index 3c1ec1a..ba5a22b 100644 (file)
@@ -80,7 +80,7 @@ static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
 /*  The maximum single-message size which can be received */
 static int smb_direct_max_receive_size = 8192;
 
-static int smb_direct_max_read_write_size = 1048512;
+static int smb_direct_max_read_write_size = 524224;
 
 static int smb_direct_max_outstanding_rw_ops = 8;
 
index adf94a4..8c37aaf 100644 (file)
@@ -47,6 +47,7 @@ struct ksmbd_dir_info {
        int             last_entry_offset;
        bool            hide_dot_file;
        int             flags;
+       int             last_entry_off_align;
 };
 
 struct ksmbd_readdir_data {
index 40b994a..de6fae8 100644 (file)
@@ -469,6 +469,24 @@ void mnt_drop_write_file(struct file *file)
 }
 EXPORT_SYMBOL(mnt_drop_write_file);
 
+/**
+ * mnt_hold_writers - prevent write access to the given mount
+ * @mnt: mnt to prevent write access to
+ *
+ * Prevents write access to @mnt if there are no active writers for @mnt.
+ * This function needs to be called and return successfully before changing
+ * properties of @mnt that need to remain stable for callers with write access
+ * to @mnt.
+ *
+ * After this functions has been called successfully callers must pair it with
+ * a call to mnt_unhold_writers() in order to stop preventing write access to
+ * @mnt.
+ *
+ * Context: This function expects lock_mount_hash() to be held serializing
+ *          setting MNT_WRITE_HOLD.
+ * Return: On success 0 is returned.
+ *        On error, -EBUSY is returned.
+ */
 static inline int mnt_hold_writers(struct mount *mnt)
 {
        mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -500,6 +518,18 @@ static inline int mnt_hold_writers(struct mount *mnt)
        return 0;
 }
 
+/**
+ * mnt_unhold_writers - stop preventing write access to the given mount
+ * @mnt: mnt to stop preventing write access to
+ *
+ * Stop preventing write access to @mnt allowing callers to gain write access
+ * to @mnt again.
+ *
+ * This function can only be called after a successful call to
+ * mnt_hold_writers().
+ *
+ * Context: This function expects lock_mount_hash() to be held.
+ */
 static inline void mnt_unhold_writers(struct mount *mnt)
 {
        /*
index f18e80f..d1f3422 100644 (file)
@@ -177,6 +177,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
        INIT_LIST_HEAD(&clp->cl_superblocks);
        clp->cl_rpcclient = ERR_PTR(-EINVAL);
 
+       clp->cl_flags = cl_init->init_flags;
        clp->cl_proto = cl_init->proto;
        clp->cl_nconnect = cl_init->nconnect;
        clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
@@ -423,7 +424,6 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
                        list_add_tail(&new->cl_share_link,
                                        &nn->nfs_client_list);
                        spin_unlock(&nn->nfs_client_lock);
-                       new->cl_flags = cl_init->init_flags;
                        return rpc_ops->init_client(new, cl_init);
                }
 
index 848f3b8..75cb1cb 100644 (file)
@@ -80,6 +80,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
                ctx->dir_cookie = 0;
                ctx->dup_cookie = 0;
                ctx->page_index = 0;
+               ctx->eof = false;
                spin_lock(&dir->i_lock);
                if (list_empty(&nfsi->open_files) &&
                    (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
@@ -168,6 +169,7 @@ struct nfs_readdir_descriptor {
        unsigned int    cache_entry_index;
        signed char duped;
        bool plus;
+       bool eob;
        bool eof;
 };
 
@@ -867,7 +869,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
 
                status = nfs_readdir_page_filler(desc, entry, pages, pglen,
                                                 arrays, narrays);
-       } while (!status && nfs_readdir_page_needs_filling(page));
+       } while (!status && nfs_readdir_page_needs_filling(page) &&
+               page_mapping(page));
 
        nfs_readdir_free_pages(pages, array_size);
 out:
@@ -988,7 +991,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
                ent = &array->array[i];
                if (!dir_emit(desc->ctx, ent->name, ent->name_len,
                    nfs_compat_user_ino64(ent->ino), ent->d_type)) {
-                       desc->eof = true;
+                       desc->eob = true;
                        break;
                }
                memcpy(desc->verf, verf, sizeof(desc->verf));
@@ -1004,7 +1007,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
                        desc->duped = 1;
        }
        if (array->page_is_eof)
-               desc->eof = true;
+               desc->eof = !desc->eob;
 
        kunmap(desc->page);
        dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
@@ -1041,12 +1044,13 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
                goto out;
 
        desc->page_index = 0;
+       desc->cache_entry_index = 0;
        desc->last_cookie = desc->dir_cookie;
        desc->duped = 0;
 
        status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz);
 
-       for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
+       for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
                desc->page = arrays[i];
                nfs_do_filldir(desc, verf);
        }
@@ -1105,9 +1109,15 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
        desc->duped = dir_ctx->duped;
        page_index = dir_ctx->page_index;
        desc->attr_gencount = dir_ctx->attr_gencount;
+       desc->eof = dir_ctx->eof;
        memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf));
        spin_unlock(&file->f_lock);
 
+       if (desc->eof) {
+               res = 0;
+               goto out_free;
+       }
+
        if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) &&
            list_is_singular(&nfsi->open_files))
                invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1);
@@ -1141,7 +1151,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
 
                nfs_do_filldir(desc, nfsi->cookieverf);
                nfs_readdir_page_unlock_and_put_cached(desc);
-       } while (!desc->eof);
+       } while (!desc->eob && !desc->eof);
 
        spin_lock(&file->f_lock);
        dir_ctx->dir_cookie = desc->dir_cookie;
@@ -1149,9 +1159,10 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
        dir_ctx->duped = desc->duped;
        dir_ctx->attr_gencount = desc->attr_gencount;
        dir_ctx->page_index = desc->page_index;
+       dir_ctx->eof = desc->eof;
        memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf));
        spin_unlock(&file->f_lock);
-
+out_free:
        kfree(desc);
 
 out:
@@ -1193,6 +1204,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
                if (offset == 0)
                        memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf));
                dir_ctx->duped = 0;
+               dir_ctx->eof = false;
        }
        spin_unlock(&filp->f_lock);
        return offset;
@@ -1998,14 +2010,14 @@ no_open:
        if (!res) {
                inode = d_inode(dentry);
                if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
-                   !S_ISDIR(inode->i_mode))
+                   !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
                        res = ERR_PTR(-ENOTDIR);
                else if (inode && S_ISREG(inode->i_mode))
                        res = ERR_PTR(-EOPENSTALE);
        } else if (!IS_ERR(res)) {
                inode = d_inode(res);
                if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
-                   !S_ISDIR(inode->i_mode)) {
+                   !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
                        dput(res);
                        res = ERR_PTR(-ENOTDIR);
                } else if (inode && S_ISREG(inode->i_mode)) {
index a918c3a..d96baa4 100644 (file)
@@ -853,12 +853,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
        }
 
        /* Flush out writes to the server in order to update c/mtime.  */
-       if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
-                       S_ISREG(inode->i_mode)) {
-               err = filemap_write_and_wait(inode->i_mapping);
-               if (err)
-                       goto out;
-       }
+       if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
+           S_ISREG(inode->i_mode))
+               filemap_write_and_wait(inode->i_mapping);
 
        /*
         * We may force a getattr if the user cares about atime.
index b18f31b..0e0db6c 100644 (file)
@@ -1229,8 +1229,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
                                NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
                                NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
                                NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
-                               NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
-                               NFS_INO_REVAL_PAGECACHE;
+                               NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
                nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
        }
        nfsi->attrtimeo_timestamp = jiffies;
@@ -8032,7 +8031,8 @@ static int _nfs41_proc_get_locations(struct nfs_server *server,
 
 /**
  * nfs4_proc_get_locations - discover locations for a migrated FSID
- * @inode: inode on FSID that is migrating
+ * @server: pointer to nfs_server to process
+ * @fhandle: pointer to the kernel NFS client file handle
  * @locations: result of query
  * @page: buffer
  * @cred: credential to use for this operation
index 8ef53f6..936eebd 100644 (file)
@@ -150,13 +150,17 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
        unsigned int len;
        int v;
 
-       argp->count = min_t(u32, argp->count, max_blocksize);
-
        dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
                                SVCFH_fmt(&argp->fh),
                                (unsigned long) argp->count,
                                (unsigned long long) argp->offset);
 
+       argp->count = min_t(u32, argp->count, max_blocksize);
+       if (argp->offset > (u64)OFFSET_MAX)
+               argp->offset = (u64)OFFSET_MAX;
+       if (argp->offset + argp->count > (u64)OFFSET_MAX)
+               argp->count = (u64)OFFSET_MAX - argp->offset;
+
        v = 0;
        len = argp->count;
        resp->pages = rqstp->rq_next_page;
@@ -199,6 +203,11 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
                                (unsigned long long) argp->offset,
                                argp->stable? " stable" : "");
 
+       resp->status = nfserr_fbig;
+       if (argp->offset > (u64)OFFSET_MAX ||
+           argp->offset + argp->len > (u64)OFFSET_MAX)
+               return rpc_success;
+
        fh_copy(&resp->fh, &argp->fh);
        resp->committed = argp->stable;
        nvecs = svc_fill_write_vector(rqstp, &argp->payload);
@@ -651,15 +660,9 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
                                argp->count,
                                (unsigned long long) argp->offset);
 
-       if (argp->offset > NFS_OFFSET_MAX) {
-               resp->status = nfserr_inval;
-               goto out;
-       }
-
        fh_copy(&resp->fh, &argp->fh);
        resp->status = nfsd_commit(rqstp, &resp->fh, argp->offset,
                                   argp->count, resp->verf);
-out:
        return rpc_success;
 }
 
index 7c45ba4..0293b8d 100644 (file)
@@ -254,7 +254,7 @@ svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                if (xdr_stream_decode_u64(xdr, &newsize) < 0)
                        return false;
                iap->ia_valid |= ATTR_SIZE;
-               iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX);
+               iap->ia_size = newsize;
        }
        if (xdr_stream_decode_u32(xdr, &set_it) < 0)
                return false;
@@ -1060,7 +1060,7 @@ svcxdr_encode_entry3_common(struct nfsd3_readdirres *resp, const char *name,
                return false;
        /* cookie */
        resp->cookie_offset = dirlist->len;
-       if (xdr_stream_encode_u64(xdr, NFS_OFFSET_MAX) < 0)
+       if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0)
                return false;
 
        return true;
index ed1ee25..b207c76 100644 (file)
@@ -782,12 +782,16 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        __be32 status;
 
        read->rd_nf = NULL;
-       if (read->rd_offset >= OFFSET_MAX)
-               return nfserr_inval;
 
        trace_nfsd_read_start(rqstp, &cstate->current_fh,
                              read->rd_offset, read->rd_length);
 
+       read->rd_length = min_t(u32, read->rd_length, svc_max_payload(rqstp));
+       if (read->rd_offset > (u64)OFFSET_MAX)
+               read->rd_offset = (u64)OFFSET_MAX;
+       if (read->rd_offset + read->rd_length > (u64)OFFSET_MAX)
+               read->rd_length = (u64)OFFSET_MAX - read->rd_offset;
+
        /*
         * If we do a zero copy read, then a client will see read data
         * that reflects the state of the file *after* performing the
@@ -1018,8 +1022,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        unsigned long cnt;
        int nvecs;
 
-       if (write->wr_offset >= OFFSET_MAX)
-               return nfserr_inval;
+       if (write->wr_offset > (u64)OFFSET_MAX ||
+           write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
+               return nfserr_fbig;
 
        cnt = write->wr_buflen;
        trace_nfsd_write_start(rqstp, &cstate->current_fh,
index 899de43..714a3a3 100644 (file)
@@ -3495,7 +3495,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        p = xdr_reserve_space(xdr, 3*4 + namlen);
        if (!p)
                goto fail;
-       p = xdr_encode_hyper(p, NFS_OFFSET_MAX);    /* offset of next entry */
+       p = xdr_encode_hyper(p, OFFSET_MAX);        /* offset of next entry */
        p = xdr_encode_array(p, name, namlen);      /* name length & name */
 
        nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
@@ -3986,10 +3986,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
        }
        xdr_commit_encode(xdr);
 
-       maxcount = svc_max_payload(resp->rqstp);
-       maxcount = min_t(unsigned long, maxcount,
+       maxcount = min_t(unsigned long, read->rd_length,
                         (xdr->buf->buflen - xdr->buf->len));
-       maxcount = min_t(unsigned long, maxcount, read->rd_length);
 
        if (file->f_op->splice_read &&
            test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
@@ -4826,10 +4824,8 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
                return nfserr_resource;
        xdr_commit_encode(xdr);
 
-       maxcount = svc_max_payload(resp->rqstp);
-       maxcount = min_t(unsigned long, maxcount,
+       maxcount = min_t(unsigned long, read->rd_length,
                         (xdr->buf->buflen - xdr->buf->len));
-       maxcount = min_t(unsigned long, maxcount, read->rd_length);
        count    = maxcount;
 
        eof = read->rd_offset >= i_size_read(file_inode(file));
index c4cf563..5889db6 100644 (file)
@@ -306,14 +306,14 @@ TRACE_EVENT(nfsd_export_update,
 DECLARE_EVENT_CLASS(nfsd_io_class,
        TP_PROTO(struct svc_rqst *rqstp,
                 struct svc_fh  *fhp,
-                loff_t         offset,
-                unsigned long  len),
+                u64            offset,
+                u32            len),
        TP_ARGS(rqstp, fhp, offset, len),
        TP_STRUCT__entry(
                __field(u32, xid)
                __field(u32, fh_hash)
-               __field(loff_t, offset)
-               __field(unsigned long, len)
+               __field(u64, offset)
+               __field(u32, len)
        ),
        TP_fast_assign(
                __entry->xid = be32_to_cpu(rqstp->rq_xid);
@@ -321,7 +321,7 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
                __entry->offset = offset;
                __entry->len = len;
        ),
-       TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu",
+       TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u",
                  __entry->xid, __entry->fh_hash,
                  __entry->offset, __entry->len)
 )
@@ -330,8 +330,8 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
 DEFINE_EVENT(nfsd_io_class, nfsd_##name,       \
        TP_PROTO(struct svc_rqst *rqstp,        \
                 struct svc_fh  *fhp,           \
-                loff_t         offset,         \
-                unsigned long  len),           \
+                u64            offset,         \
+                u32            len),           \
        TP_ARGS(rqstp, fhp, offset, len))
 
 DEFINE_NFSD_IO_EVENT(read_start);
index 99c2b9d..91600e7 100644 (file)
@@ -435,6 +435,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                        .ia_size        = iap->ia_size,
                };
 
+               host_err = -EFBIG;
+               if (iap->ia_size < 0)
+                       goto out_unlock;
+
                host_err = notify_change(&init_user_ns, dentry, &size_attr, NULL);
                if (host_err)
                        goto out_unlock;
@@ -1110,42 +1114,61 @@ out:
 }
 
 #ifdef CONFIG_NFSD_V3
-/*
- * Commit all pending writes to stable storage.
+/**
+ * nfsd_commit - Commit pending writes to stable storage
+ * @rqstp: RPC request being processed
+ * @fhp: NFS filehandle
+ * @offset: raw offset from beginning of file
+ * @count: raw count of bytes to sync
+ * @verf: filled in with the server's current write verifier
  *
- * Note: we only guarantee that data that lies within the range specified
- * by the 'offset' and 'count' parameters will be synced.
+ * Note: we guarantee that data that lies within the range specified
+ * by the 'offset' and 'count' parameters will be synced. The server
+ * is permitted to sync data that lies outside this range at the
+ * same time.
  *
  * Unfortunately we cannot lock the file to make sure we return full WCC
  * data to the client, as locking happens lower down in the filesystem.
+ *
+ * Return values:
+ *   An nfsstat value in network byte order.
  */
 __be32
-nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
-               loff_t offset, unsigned long count, __be32 *verf)
+nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
+           u32 count, __be32 *verf)
 {
+       u64                     maxbytes;
+       loff_t                  start, end;
        struct nfsd_net         *nn;
        struct nfsd_file        *nf;
-       loff_t                  end = LLONG_MAX;
-       __be32                  err = nfserr_inval;
-
-       if (offset < 0)
-               goto out;
-       if (count != 0) {
-               end = offset + (loff_t)count - 1;
-               if (end < offset)
-                       goto out;
-       }
+       __be32                  err;
 
        err = nfsd_file_acquire(rqstp, fhp,
                        NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &nf);
        if (err)
                goto out;
+
+       /*
+        * Convert the client-provided (offset, count) range to a
+        * (start, end) range. If the client-provided range falls
+        * outside the maximum file size of the underlying FS,
+        * clamp the sync range appropriately.
+        */
+       start = 0;
+       end = LLONG_MAX;
+       maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
+       if (offset < maxbytes) {
+               start = offset;
+               if (count && (offset + count - 1 < maxbytes))
+                       end = offset + count - 1;
+       }
+
        nn = net_generic(nf->nf_net, nfsd_net_id);
        if (EX_ISSYNC(fhp->fh_export)) {
                errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
                int err2;
 
-               err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+               err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
                switch (err2) {
                case 0:
                        nfsd_copy_write_verifier(verf, nn);
index 9f56dcb..2c43d10 100644 (file)
@@ -74,8 +74,8 @@ __be32                do_nfsd_create(struct svc_rqst *, struct svc_fh *,
                                char *name, int len, struct iattr *attrs,
                                struct svc_fh *res, int createmode,
                                u32 *verifier, bool *truncp, bool *created);
-__be32         nfsd_commit(struct svc_rqst *, struct svc_fh *,
-                               loff_t, unsigned long, __be32 *verf);
+__be32         nfsd_commit(struct svc_rqst *rqst, struct svc_fh *fhp,
+                               u64 offset, u32 count, __be32 *verf);
 #endif /* CONFIG_NFSD_V3 */
 #ifdef CONFIG_NFSD_V4
 __be32         nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
index 18f8c3a..f46060e 100644 (file)
@@ -309,7 +309,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 
        name = arch_vma_name(vma);
        if (!name) {
-               const char *anon_name;
+               struct anon_vma_name *anon_name;
 
                if (!mm) {
                        name = "[vdso]";
@@ -327,10 +327,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
                        goto done;
                }
 
-               anon_name = vma_anon_name(vma);
+               anon_name = anon_vma_name(vma);
                if (anon_name) {
                        seq_pad(m, ' ');
-                       seq_printf(m, "[anon:%s]", anon_name);
+                       seq_printf(m, "[anon:%s]", anon_name->name);
                }
        }
 
@@ -440,7 +440,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
 }
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
-               bool compound, bool young, bool dirty, bool locked)
+               bool compound, bool young, bool dirty, bool locked,
+               bool migration)
 {
        int i, nr = compound ? compound_nr(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
@@ -467,8 +468,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
         * page_count(page) == 1 guarantees the page is mapped exactly once.
         * If any subpage of the compound page mapped with PTE it would elevate
         * page_count().
+        *
+        * The page_mapcount() is called to get a snapshot of the mapcount.
+        * Without holding the page lock this snapshot can be slightly wrong as
+        * we cannot always read the mapcount atomically.  It is not safe to
+        * call page_mapcount() even with PTL held if the page is not mapped,
+        * especially for migration entries.  Treat regular migration entries
+        * as mapcount == 1.
         */
-       if (page_count(page) == 1) {
+       if ((page_count(page) == 1) || migration) {
                smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
                        locked, true);
                return;
@@ -517,6 +525,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
        struct vm_area_struct *vma = walk->vma;
        bool locked = !!(vma->vm_flags & VM_LOCKED);
        struct page *page = NULL;
+       bool migration = false;
 
        if (pte_present(*pte)) {
                page = vm_normal_page(vma, addr, *pte);
@@ -536,8 +545,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                        } else {
                                mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
                        }
-               } else if (is_pfn_swap_entry(swpent))
+               } else if (is_pfn_swap_entry(swpent)) {
+                       if (is_migration_entry(swpent))
+                               migration = true;
                        page = pfn_swap_entry_to_page(swpent);
+               }
        } else {
                smaps_pte_hole_lookup(addr, walk);
                return;
@@ -546,7 +558,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
        if (!page)
                return;
 
-       smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
+       smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
+                     locked, migration);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -557,6 +570,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        struct vm_area_struct *vma = walk->vma;
        bool locked = !!(vma->vm_flags & VM_LOCKED);
        struct page *page = NULL;
+       bool migration = false;
 
        if (pmd_present(*pmd)) {
                /* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -564,8 +578,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
        } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
 
-               if (is_migration_entry(entry))
+               if (is_migration_entry(entry)) {
+                       migration = true;
                        page = pfn_swap_entry_to_page(entry);
+               }
        }
        if (IS_ERR_OR_NULL(page))
                return;
@@ -577,7 +593,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
                /* pass */;
        else
                mss->file_thp += HPAGE_PMD_SIZE;
-       smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
+
+       smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
+                     locked, migration);
 }
 #else
 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -1378,6 +1396,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
 {
        u64 frame = 0, flags = 0;
        struct page *page = NULL;
+       bool migration = false;
 
        if (pte_present(pte)) {
                if (pm->show_pfn)
@@ -1399,13 +1418,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
                        frame = swp_type(entry) |
                                (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
                flags |= PM_SWAP;
+               migration = is_migration_entry(entry);
                if (is_pfn_swap_entry(entry))
                        page = pfn_swap_entry_to_page(entry);
        }
 
        if (page && !PageAnon(page))
                flags |= PM_FILE;
-       if (page && page_mapcount(page) == 1)
+       if (page && !migration && page_mapcount(page) == 1)
                flags |= PM_MMAP_EXCLUSIVE;
        if (vma->vm_flags & VM_SOFTDIRTY)
                flags |= PM_SOFT_DIRTY;
@@ -1421,8 +1441,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
        spinlock_t *ptl;
        pte_t *pte, *orig_pte;
        int err = 0;
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       bool migration = false;
+
        ptl = pmd_trans_huge_lock(pmdp, vma);
        if (ptl) {
                u64 flags = 0, frame = 0;
@@ -1461,11 +1482,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
                        if (pmd_swp_uffd_wp(pmd))
                                flags |= PM_UFFD_WP;
                        VM_BUG_ON(!is_pmd_migration_entry(pmd));
+                       migration = is_migration_entry(entry);
                        page = pfn_swap_entry_to_page(entry);
                }
 #endif
 
-               if (page && page_mapcount(page) == 1)
+               if (page && !migration && page_mapcount(page) == 1)
                        flags |= PM_MMAP_EXCLUSIVE;
 
                for (; addr != end; addr += PAGE_SIZE) {
@@ -1575,7 +1597,8 @@ static const struct mm_walk_ops pagemap_ops = {
  * Bits 5-54  swap offset if swapped
  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
  * Bit  56    page exclusively mapped
- * Bits 57-60 zero
+ * Bit  57    pte is uffd-wp write-protected
+ * Bits 58-60 zero
  * Bit  61    page is file-page or shared-anon
  * Bit  62    page swapped
  * Bit  63    page present
index bafc02b..de72527 100644 (file)
@@ -264,7 +264,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
                        if (!gid_valid(gid))
                                return -EINVAL;
                        opts->gid = gid;
-                       set_gid(tracefs_mount->mnt_root, gid);
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
@@ -291,7 +290,9 @@ static int tracefs_apply_options(struct super_block *sb)
        inode->i_mode |= opts->mode;
 
        inode->i_uid = opts->uid;
-       inode->i_gid = opts->gid;
+
+       /* Set all the group ids to the mount option */
+       set_gid(sb->s_root, opts->gid);
 
        return 0;
 }
index e26b101..8e03b3d 100644 (file)
@@ -878,7 +878,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                                 new_flags, vma->anon_vma,
                                 vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                NULL_VM_UFFD_CTX, vma_anon_name(vma));
+                                NULL_VM_UFFD_CTX, anon_vma_name(vma));
                if (prev)
                        vma = prev;
                else
@@ -1438,7 +1438,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
                                 ((struct vm_userfaultfd_ctx){ ctx }),
-                                vma_anon_name(vma));
+                                anon_vma_name(vma));
                if (prev) {
                        vma = prev;
                        goto next;
@@ -1615,7 +1615,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                prev = vma_merge(mm, prev, start, vma_end, new_flags,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                NULL_VM_UFFD_CTX, vma_anon_name(vma));
+                                NULL_VM_UFFD_CTX, anon_vma_name(vma));
                if (prev) {
                        vma = prev;
                        goto next;
index 4c0dee7..d84714e 100644 (file)
@@ -1753,6 +1753,11 @@ xfs_remount_ro(
        };
        int                     error;
 
+       /* Flush all the dirty data to disk. */
+       error = sync_filesystem(mp->m_super);
+       if (error)
+               return error;
+
        /*
         * Cancel background eofb scanning so it cannot race with the final
         * log force+buftarg wait and deadlock the remount.
@@ -1831,8 +1836,6 @@ xfs_fs_reconfigure(
        if (error)
                return error;
 
-       sync_filesystem(mp->m_super);
-
        /* inode32 -> inode64 */
        if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
                mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
index 7d57063..29ff6b8 100644 (file)
 #define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
 #define DRA7_L3_INSTR_CLKCTRL  DRA7_CLKCTRL_INDEX(0x28)
 
-/* iva clocks */
-#define DRA7_IVA_CLKCTRL       DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SL2IF_CLKCTRL     DRA7_CLKCTRL_INDEX(0x28)
-
 /* dss clocks */
 #define DRA7_DSS_CORE_CLKCTRL  DRA7_CLKCTRL_INDEX(0x20)
 #define DRA7_BB2D_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
 
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL       DRA7_CLKCTRL_INDEX(0x20)
-
 /* l3init clocks */
 #define DRA7_MMC1_CLKCTRL      DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_MMC2_CLKCTRL      DRA7_CLKCTRL_INDEX(0x30)
 #define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
 #define DRA7_L3INSTR_L3_INSTR_CLKCTRL  DRA7_CLKCTRL_INDEX(0x28)
 
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL               DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL             DRA7_CLKCTRL_INDEX(0x28)
+
 /* dss clocks */
 #define DRA7_DSS_DSS_CORE_CLKCTRL      DRA7_CLKCTRL_INDEX(0x20)
 #define DRA7_DSS_BB2D_CLKCTRL  DRA7_CLKCTRL_INDEX(0x30)
 
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL               DRA7_CLKCTRL_INDEX(0x20)
+
 /* l3init clocks */
 #define DRA7_L3INIT_MMC1_CLKCTRL       DRA7_CLKCTRL_INDEX(0x28)
 #define DRA7_L3INIT_MMC2_CLKCTRL       DRA7_CLKCTRL_INDEX(0x30)
index 199e47e..21292b5 100644 (file)
@@ -324,12 +324,12 @@ enum {
        ATA_LOG_NCQ_NON_DATA    = 0x12,
        ATA_LOG_NCQ_SEND_RECV   = 0x13,
        ATA_LOG_IDENTIFY_DEVICE = 0x30,
+       ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
 
        /* Identify device log pages: */
        ATA_LOG_SECURITY          = 0x06,
        ATA_LOG_SATA_SETTINGS     = 0x08,
        ATA_LOG_ZONED_INFORMATION = 0x09,
-       ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
 
        /* Identify device SATA settings log:*/
        ATA_LOG_DEVSLP_OFFSET     = 0x30,
index f35aea9..16b4703 100644 (file)
@@ -748,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
 
 bool __must_check blk_get_queue(struct request_queue *);
 extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+
+void blk_mark_disk_dead(struct gendisk *disk);
 
 #ifdef CONFIG_BLOCK
 /*
index fa517ae..3121d1f 100644 (file)
@@ -209,11 +209,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
 {
        if (unlikely(map_value_has_spin_lock(map)))
-               *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
-                       (struct bpf_spin_lock){};
+               memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
        if (unlikely(map_value_has_timer(map)))
-               *(struct bpf_timer *)(dst + map->timer_off) =
-                       (struct bpf_timer){};
+               memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
 }
 
 /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
@@ -224,7 +222,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
        if (unlikely(map_value_has_spin_lock(map))) {
                s_off = map->spin_lock_off;
                s_sz = sizeof(struct bpf_spin_lock);
-       } else if (unlikely(map_value_has_timer(map))) {
+       }
+       if (unlikely(map_value_has_timer(map))) {
                t_off = map->timer_off;
                t_sz = sizeof(struct bpf_timer);
        }
@@ -1793,6 +1792,11 @@ struct bpf_core_ctx {
 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
                   int relo_idx, void *insn);
 
+static inline bool unprivileged_ebpf_enabled(void)
+{
+       return !sysctl_unprivileged_bpf_disabled;
+}
+
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -2012,6 +2016,12 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
 {
        return NULL;
 }
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+       return false;
+}
+
 #endif /* CONFIG_BPF_SYSCALL */
 
 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
index 429dceb..0f7fd20 100644 (file)
@@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
  */
 #define __stringify_label(n) #n
 
-#define __annotate_reachable(c) ({                                     \
-       asm volatile(__stringify_label(c) ":\n\t"                       \
-                    ".pushsection .discard.reachable\n\t"              \
-                    ".long " __stringify_label(c) "b - .\n\t"          \
-                    ".popsection\n\t" : : "i" (c));                    \
-})
-#define annotate_reachable() __annotate_reachable(__COUNTER__)
-
 #define __annotate_unreachable(c) ({                                   \
        asm volatile(__stringify_label(c) ":\n\t"                       \
                     ".pushsection .discard.unreachable\n\t"            \
@@ -133,24 +125,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 })
 #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
 
-#define ASM_UNREACHABLE                                                        \
-       "999:\n\t"                                                      \
-       ".pushsection .discard.unreachable\n\t"                         \
-       ".long 999b - .\n\t"                                            \
+#define ASM_REACHABLE                                                  \
+       "998:\n\t"                                                      \
+       ".pushsection .discard.reachable\n\t"                           \
+       ".long 998b - .\n\t"                                            \
        ".popsection\n\t"
 
 /* Annotate a C jump table to allow objtool to follow the code flow */
 #define __annotate_jump_table __section(".rodata..c_jump_table")
 
 #else
-#define annotate_reachable()
 #define annotate_unreachable()
+# define ASM_REACHABLE
 #define __annotate_jump_table
 #endif
 
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
 #ifndef unreachable
 # define unreachable() do {            \
        annotate_unreachable();         \
index 1ab29e6..3522a27 100644 (file)
@@ -382,6 +382,9 @@ struct cpufreq_driver {
        int             (*suspend)(struct cpufreq_policy *policy);
        int             (*resume)(struct cpufreq_policy *policy);
 
+       /* Will be called after the driver is fully initialized */
+       void            (*ready)(struct cpufreq_policy *policy);
+
        struct freq_attr **attr;
 
        /* platform specific boost support code */
index f565a89..fe2e017 100644 (file)
@@ -1262,6 +1262,7 @@ struct hv_device {
        struct vmbus_channel *channel;
        struct kset          *channels_kset;
        struct device_dma_parameters dma_parms;
+       u64 dma_mask;
 
        /* place holder to keep track of the dir for hv device in debugfs */
        struct dentry *debug_dir;
index 4b5e367..f49e642 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/atomic.h>
 #include <linux/static_key.h>
 
+extern unsigned long kfence_sample_interval;
+
 /*
  * We allocate an even number of pages, as it simplifies calculations to map
  * address to metadata indices; effectively, the very first page serves as an
index b72d751..0abbd68 100644 (file)
@@ -219,7 +219,7 @@ struct obj_cgroup {
        struct mem_cgroup *memcg;
        atomic_t nr_charged_bytes;
        union {
-               struct list_head list;
+               struct list_head list; /* protected by objcg_lock */
                struct rcu_head rcu;
        };
 };
@@ -315,7 +315,8 @@ struct mem_cgroup {
 #ifdef CONFIG_MEMCG_KMEM
        int kmemcg_id;
        struct obj_cgroup __rcu *objcg;
-       struct list_head objcg_list; /* list of inherited objcgs */
+       /* list of inherited objcgs, protected by objcg_lock */
+       struct list_head objcg_list;
 #endif
 
        MEMCG_PADDING(_pad2_);
index 213cc56..5744a3f 100644 (file)
@@ -2626,7 +2626,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-       struct mempolicy *, struct vm_userfaultfd_ctx, const char *);
+       struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
        unsigned long addr, int new_below);
@@ -3372,11 +3372,12 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
 
 #ifdef CONFIG_ANON_VMA_NAME
 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-                         unsigned long len_in, const char *name);
+                         unsigned long len_in,
+                         struct anon_vma_name *anon_name);
 #else
 static inline int
 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-                     unsigned long len_in, const char *name) {
+                     unsigned long len_in, struct anon_vma_name *anon_name) {
        return 0;
 }
 #endif
index b725839..cf90b1f 100644 (file)
@@ -140,50 +140,91 @@ static __always_inline void del_page_from_lru_list(struct page *page,
 
 #ifdef CONFIG_ANON_VMA_NAME
 /*
- * mmap_lock should be read-locked when calling vma_anon_name() and while using
- * the returned pointer.
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
  */
-extern const char *vma_anon_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
+extern void anon_vma_name_free(struct kref *kref);
 
-/*
- * mmap_lock should be read-locked for orig_vma->vm_mm.
- * mmap_lock should be write-locked for new_vma->vm_mm or new_vma should be
- * isolated.
- */
-extern void dup_vma_anon_name(struct vm_area_struct *orig_vma,
-                             struct vm_area_struct *new_vma);
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
+{
+       if (anon_name)
+               kref_get(&anon_name->kref);
+}
 
-/*
- * mmap_lock should be write-locked or vma should have been isolated under
- * write-locked mmap_lock protection.
- */
-extern void free_vma_anon_name(struct vm_area_struct *vma);
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
+{
+       if (anon_name)
+               kref_put(&anon_name->kref, anon_vma_name_free);
+}
 
-/* mmap_lock should be read-locked */
-static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
-                                        const char *name)
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+       /* Prevent anon_name refcount saturation early on */
+       if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+               anon_vma_name_get(anon_name);
+               return anon_name;
+
+       }
+       return anon_vma_name_alloc(anon_name->name);
+}
+
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+                                    struct vm_area_struct *new_vma)
+{
+       struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+       if (anon_name)
+               new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
 {
-       const char *vma_name = vma_anon_name(vma);
+       /*
+        * Not using anon_vma_name because it generates a warning if mmap_lock
+        * is not held, which might be the case here.
+        */
+       if (!vma->vm_file)
+               anon_vma_name_put(vma->anon_name);
+}
 
-       /* either both NULL, or pointers to same string */
-       if (vma_name == name)
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+                                   struct anon_vma_name *anon_name2)
+{
+       if (anon_name1 == anon_name2)
                return true;
 
-       return name && vma_name && !strcmp(name, vma_name);
+       return anon_name1 && anon_name2 &&
+               !strcmp(anon_name1->name, anon_name2->name);
 }
+
 #else /* CONFIG_ANON_VMA_NAME */
-static inline const char *vma_anon_name(struct vm_area_struct *vma)
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
 {
        return NULL;
 }
-static inline void dup_vma_anon_name(struct vm_area_struct *orig_vma,
-                             struct vm_area_struct *new_vma) {}
-static inline void free_vma_anon_name(struct vm_area_struct *vma) {}
-static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
-                                        const char *name)
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+       return NULL;
+}
+
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+                                    struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+                                   struct anon_vma_name *anon_name2)
 {
        return true;
 }
+
 #endif  /* CONFIG_ANON_VMA_NAME */
 
 static inline void init_tlb_flush_pending(struct mm_struct *mm)
index 5140e5f..0f54987 100644 (file)
@@ -416,7 +416,10 @@ struct vm_area_struct {
                        struct rb_node rb;
                        unsigned long rb_subtree_last;
                } shared;
-               /* Serialized by mmap_sem. */
+               /*
+                * Serialized by mmap_sem. Never use directly because it is
+                * valid only when vm_file is NULL. Use anon_vma_name instead.
+                */
                struct anon_vma_name *anon_name;
        };
 
index e490b84..8b5a314 100644 (file)
@@ -2158,7 +2158,7 @@ struct net_device {
        struct netdev_queue     *_tx ____cacheline_aligned_in_smp;
        unsigned int            num_tx_queues;
        unsigned int            real_num_tx_queues;
-       struct Qdisc            *qdisc;
+       struct Qdisc __rcu      *qdisc;
        unsigned int            tx_queue_len;
        spinlock_t              tx_global_lock;
 
index b4dd96e..e6487a6 100644 (file)
@@ -101,7 +101,11 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
        nf_hook_state_init(&state, NF_NETDEV_EGRESS,
                           NFPROTO_NETDEV, dev, NULL, NULL,
                           dev_net(dev), NULL);
+
+       /* nf assumes rcu_read_lock, not just read_lock_bh */
+       rcu_read_lock();
        ret = nf_hook_slow(skb, &state, e, 0);
+       rcu_read_unlock();
 
        if (ret == 1) {
                return skb;
index 0dc7ad3..b06375e 100644 (file)
@@ -36,14 +36,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
        memcpy(target->data, source->data, source->size);
 }
 
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX         ((__s64)((~(__u64)0) >> 1))
-
-
 enum nfs3_stable_how {
        NFS_UNSTABLE = 0,
        NFS_DATA_SYNC = 1,
index 02aa493..68f81d8 100644 (file)
@@ -107,6 +107,7 @@ struct nfs_open_dir_context {
        __u64 dup_cookie;
        pgoff_t page_index;
        signed char duped;
+       bool eof;
 };
 
 /*
index 959e0bd..7547015 100644 (file)
@@ -12,6 +12,7 @@
 #define NVME_TCP_DISC_PORT     8009
 #define NVME_TCP_ADMIN_CCSZ    SZ_8K
 #define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
 
 enum nvme_tcp_pfv {
        NVME_TCP_PFV_1_0 = 0x0,
index 98efb7b..c9a3ac9 100644 (file)
@@ -70,7 +70,8 @@ struct nvmem_keepout {
  * @word_size: Minimum read/write access granularity.
  * @stride:    Minimum read/write access stride.
  * @priv:      User context passed to read/write callbacks.
- * @wp-gpio:   Write protect pin
+ * @wp-gpio:   Write protect pin
+ * @ignore_wp:  Write Protect pin is managed by the provider.
  *
  * Note: A default "nvmem<id>" name will be assigned to the device if
  * no name is specified in its configuration. In such case "<id>" is
@@ -92,6 +93,7 @@ struct nvmem_config {
        enum nvmem_type         type;
        bool                    read_only;
        bool                    root_only;
+       bool                    ignore_wp;
        struct device_node      *of_node;
        bool                    no_of_node;
        nvmem_reg_read_t        reg_read;
index c35f396..373003a 100644 (file)
@@ -308,6 +308,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
        return false;
 }
 
+static inline bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+       return false;
+}
+
 static inline enum rfkill_type rfkill_find_type(const char *name)
 {
        return RFKILL_TYPE_ALL;
index b9198a1..e84e54d 100644 (file)
@@ -54,8 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
 extern void init_idle(struct task_struct *idle, int cpu);
 
 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_post_fork(struct task_struct *p,
-                           struct kernel_clone_args *kargs);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_post_fork(struct task_struct *p);
 extern void sched_dead(struct task_struct *p);
 
 void __noreturn do_task_dead(void);
index 37bde99..5b6193f 100644 (file)
@@ -660,8 +660,7 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
-                                  __alloc_size(1);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
 #define kmalloc_track_caller(size, flags) \
        __kmalloc_track_caller(size, flags, _RET_IP_)
 
index 3e8ecde..300273f 100644 (file)
@@ -497,14 +497,14 @@ extern void ksys_sync_helper(void);
 
 /* drivers/base/power/wakeup.c */
 extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
 extern suspend_state_t pm_suspend_target_state;
 
 extern bool pm_wakeup_pending(void);
 extern void pm_system_wakeup(void);
 extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
 extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
index 70c069a..dcea51f 100644 (file)
@@ -699,6 +699,8 @@ event_triggers_post_call(struct trace_event_file *file,
 
 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
 
+bool __trace_trigger_soft_disabled(struct trace_event_file *file);
+
 /**
  * trace_trigger_soft_disabled - do triggers and test if soft disabled
  * @file: The file pointer of the event to test
@@ -708,20 +710,20 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
  * triggers that require testing the fields, it will return true,
  * otherwise false.
  */
-static inline bool
+static __always_inline bool
 trace_trigger_soft_disabled(struct trace_event_file *file)
 {
        unsigned long eflags = file->flags;
 
-       if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
-               if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
-                       event_triggers_call(file, NULL, NULL, NULL);
-               if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
-                       return true;
-               if (eflags & EVENT_FILE_FL_PID_FILTER)
-                       return trace_event_ignore_this_pid(file);
-       }
-       return false;
+       if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
+                              EVENT_FILE_FL_SOFT_DISABLED |
+                              EVENT_FILE_FL_PID_FILTER))))
+               return false;
+
+       if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
+               return false;
+
+       return __trace_trigger_soft_disabled(file);
 }
 
 #ifdef CONFIG_BPF_EVENTS
index 2de442e..721089b 100644 (file)
@@ -401,18 +401,24 @@ static inline int vdpa_reset(struct vdpa_device *vdev)
        return ret;
 }
 
-static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features, bool locked)
+static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
 {
        const struct vdpa_config_ops *ops = vdev->config;
        int ret;
 
-       if (!locked)
-               mutex_lock(&vdev->cf_mutex);
-
        vdev->features_valid = true;
        ret = ops->set_driver_features(vdev, features);
-       if (!locked)
-               mutex_unlock(&vdev->cf_mutex);
+
+       return ret;
+}
+
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+       int ret;
+
+       mutex_lock(&vdev->cf_mutex);
+       ret = vdpa_set_features_unlocked(vdev, features);
+       mutex_unlock(&vdev->cf_mutex);
 
        return ret;
 }
index 72292a6..5464f39 100644 (file)
@@ -133,7 +133,6 @@ bool is_virtio_device(struct device *dev);
 void virtio_break_device(struct virtio_device *dev);
 
 void virtio_config_changed(struct virtio_device *dev);
-int virtio_finalize_features(struct virtio_device *dev);
 #ifdef CONFIG_PM_SLEEP
 int virtio_device_freeze(struct virtio_device *dev);
 int virtio_device_restore(struct virtio_device *dev);
index 4d107ad..dafdc7f 100644 (file)
@@ -64,8 +64,9 @@ struct virtio_shm_region {
  *     Returns the first 64 feature bits (all we currently need).
  * @finalize_features: confirm what device features we'll be using.
  *     vdev: the virtio_device
- *     This gives the final feature bits for the device: it can change
+ *     This sends the driver feature bits to the device: it can change
  *     the dev->feature bits if it wants.
+ * Note: despite the name this can be called any number of times.
  *     Returns 0 on success or error status
  * @bus_name: return the bus name associated with the device (optional)
  *     vdev: the virtio_device
index e7ce719..59940e2 100644 (file)
@@ -109,8 +109,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
                       const struct in6_addr *daddr, unsigned int srcprefs,
                       struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
-                     u32 banned_flags);
 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
                    u32 banned_flags);
 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
index 4b3d0b1..a647e5f 100644 (file)
@@ -506,8 +506,7 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
 
                tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
                if (IS_ERR(tmp)) {
-                       kfree_skb(skb);
-                       return tmp;
+                       return skb;
                }
 
                len -= tmp->len;
index 586f69d..e336e9c 100644 (file)
@@ -1489,6 +1489,14 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 /* Extended advertising support */
 #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
 
+/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
+ *
+ * C24: Mandatory if the LE Controller supports Connection State and either
+ * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
+ */
+#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
+                                        ext_adv_capable(dev))
+
 /* ----- HCI protocols ----- */
 #define HCI_PROTO_DEFER             0x01
 
index 38785d4..184105d 100644 (file)
@@ -262,7 +262,7 @@ struct ad_system {
 struct ad_bond_info {
        struct ad_system system;        /* 802.3ad system structure */
        struct bond_3ad_stats stats;
-       u32 agg_select_timer;           /* Timer to select aggregator after all adapter's hand shakes */
+       atomic_t agg_select_timer;              /* Timer to select aggregator after all adapter's hand shakes */
        u16 aggregator_identifier;
 };
 
index 5218041..79c67f1 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/checksum.h>
 
 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-static inline
+static __always_inline
 __wsum csum_and_copy_from_user (const void __user *src, void *dst,
                                      int len)
 {
@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
 #endif
 
 #ifndef HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user
+static __always_inline __wsum csum_and_copy_to_user
 (const void *src, void __user *dst, int len)
 {
        __wsum sum = csum_partial(src, len, ~0U);
@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user
 #endif
 
 #ifndef _HAVE_ARCH_CSUM_AND_COPY
-static inline __wsum
+static __always_inline __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len)
 {
        memcpy(dst, src, len);
@@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len)
 #endif
 
 #ifndef HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
 {
        u32 res = (__force u32)csum;
        res += (__force u32)addend;
@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
 }
 #endif
 
-static inline __wsum csum_sub(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
 {
        return csum_add(csum, ~addend);
 }
 
-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
 {
        u16 res = (__force u16)csum;
 
@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
        return (__force __sum16)(res + (res < (__force u16)addend));
 }
 
-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
 {
        return csum16_add(csum, ~addend);
 }
 
-static inline __wsum csum_shift(__wsum sum, int offset)
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
 {
        /* rotate sum to align it with a 16b boundary */
        if (offset & 1)
@@ -88,42 +88,43 @@ static inline __wsum csum_shift(__wsum sum, int offset)
        return sum;
 }
 
-static inline __wsum
+static __always_inline __wsum
 csum_block_add(__wsum csum, __wsum csum2, int offset)
 {
        return csum_add(csum, csum_shift(csum2, offset));
 }
 
-static inline __wsum
+static __always_inline __wsum
 csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
 {
        return csum_block_add(csum, csum2, offset);
 }
 
-static inline __wsum
+static __always_inline __wsum
 csum_block_sub(__wsum csum, __wsum csum2, int offset)
 {
        return csum_block_add(csum, ~csum2, offset);
 }
 
-static inline __wsum csum_unfold(__sum16 n)
+static __always_inline __wsum csum_unfold(__sum16 n)
 {
        return (__force __wsum)n;
 }
 
-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+static __always_inline
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
 {
        return csum_partial(buff, len, sum);
 }
 
 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
 
-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
 {
        *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
 }
 
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
 {
        __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
 
@@ -136,11 +137,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
  *  m : old value of a 16bit field
  *  m' : new value of a 16bit field
  */
-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
 {
        *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
 }
 
+static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
+{
+       *csum = csum_add(csum_sub(*csum, old), new);
+}
+
 struct sk_buff;
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
                              __be32 from, __be32 to, bool pseudohdr);
@@ -150,16 +156,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
                                     __wsum diff, bool pseudohdr);
 
-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
-                                           __be16 from, __be16 to,
-                                           bool pseudohdr)
+static __always_inline
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+                             __be16 from, __be16 to, bool pseudohdr)
 {
        inet_proto_csum_replace4(sum, skb, (__force __be32)from,
                                 (__force __be32)to, pseudohdr);
 }
 
-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
-                                   int start, int offset)
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+                                            int start, int offset)
 {
        __sum16 *psum = (__sum16 *)(ptr + offset);
        __wsum delta;
@@ -175,12 +181,12 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
        return delta;
 }
 
-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
 {
        *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
 }
 
-static inline __wsum wsum_negate(__wsum val)
+static __always_inline __wsum wsum_negate(__wsum val)
 {
        return (__force __wsum)-((__force u32)val);
 }
index 57b3e4e..85a5ba3 100644 (file)
@@ -1187,6 +1187,7 @@ void dsa_unregister_switch(struct dsa_switch *ds);
 int dsa_register_switch(struct dsa_switch *ds);
 void dsa_switch_shutdown(struct dsa_switch *ds);
 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
 #ifdef CONFIG_PM_SLEEP
 int dsa_switch_suspend(struct dsa_switch *ds);
 int dsa_switch_resume(struct dsa_switch *ds);
index 14efa0d..adab27b 100644 (file)
@@ -123,8 +123,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
 
        memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
               sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+       /* Unclone the dst cache if there is one */
+       if (new_md->u.tun_info.dst_cache.cache) {
+               int ret;
+
+               ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+               if (ret) {
+                       metadata_dst_free(new_md);
+                       return ERR_PTR(ret);
+               }
+       }
+#endif
+
        skb_dst_drop(skb);
-       dst_hold(&new_md->dst);
        skb_dst_set(skb, &new_md->dst);
        return new_md;
 }
index 40ae8f1..2048bc8 100644 (file)
@@ -190,14 +190,16 @@ struct fib6_info {
        u32                             fib6_metric;
        u8                              fib6_protocol;
        u8                              fib6_type;
+
+       u8                              offload;
+       u8                              trap;
+       u8                              offload_failed;
+
        u8                              should_flush:1,
                                        dst_nocount:1,
                                        dst_nopolicy:1,
                                        fib6_destroying:1,
-                                       offload:1,
-                                       trap:1,
-                                       offload_failed:1,
-                                       unused:1;
+                                       unused:4;
 
        struct rcu_head                 rcu;
        struct nexthop                  *nh;
index 3afcb12..92eec13 100644 (file)
@@ -393,17 +393,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
                kfree_rcu(opt, rcu);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
 
 extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
 static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
                                                    __be32 label)
 {
-       if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+       if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+           READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
                return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
 
        return NULL;
 }
+#endif
 
 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
                                         struct ip6_flowlabel *fl,
index 53cb8de..47ffb36 100644 (file)
@@ -475,9 +475,9 @@ int igmp6_late_init(void);
 void igmp6_cleanup(void);
 void igmp6_late_cleanup(void);
 
-int igmp6_event_query(struct sk_buff *skb);
+void igmp6_event_query(struct sk_buff *skb);
 
-int igmp6_event_report(struct sk_buff *skb);
+void igmp6_event_report(struct sk_buff *skb);
 
 
 #ifdef CONFIG_SYSCTL
index a3647fa..bd59e95 100644 (file)
@@ -96,6 +96,7 @@ enum flow_offload_xmit_type {
        FLOW_OFFLOAD_XMIT_NEIGH,
        FLOW_OFFLOAD_XMIT_XFRM,
        FLOW_OFFLOAD_XMIT_DIRECT,
+       FLOW_OFFLOAD_XMIT_TC,
 };
 
 #define NF_FLOW_TABLE_ENCAP_MAX                2
@@ -127,7 +128,7 @@ struct flow_offload_tuple {
        struct { }                      __hash;
 
        u8                              dir:2,
-                                       xmit_type:2,
+                                       xmit_type:3,
                                        encap_num:2,
                                        in_vlan_ingress:2;
        u16                             mtu;
@@ -142,6 +143,9 @@ struct flow_offload_tuple {
                        u8              h_source[ETH_ALEN];
                        u8              h_dest[ETH_ALEN];
                } out;
+               struct {
+                       u32             iifidx;
+               } tc;
        };
 };
 
index 9eed51e..980daa6 100644 (file)
@@ -37,7 +37,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh);
 void nf_unregister_queue_handler(void);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_free(struct nf_queue_entry *entry);
 
 static inline void init_hashrandom(u32 *jhash_initval)
index eaf55da..c4c0861 100644 (file)
@@ -905,9 +905,9 @@ struct nft_expr_ops {
        int                             (*offload)(struct nft_offload_ctx *ctx,
                                                   struct nft_flow_rule *flow,
                                                   const struct nft_expr *expr);
+       bool                            (*offload_action)(const struct nft_expr *expr);
        void                            (*offload_stats)(struct nft_expr *expr,
                                                         const struct flow_stats *stats);
-       u32                             offload_flags;
        const struct nft_expr_type      *type;
        void                            *data;
 };
index f9d95ff..7971478 100644 (file)
@@ -67,8 +67,6 @@ struct nft_flow_rule {
        struct flow_rule        *rule;
 };
 
-#define NFT_OFFLOAD_F_ACTION   (1 << 0)
-
 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
                                 enum flow_dissector_key_id addr_type);
 
index a4b5503..6bd7e5a 100644 (file)
@@ -77,9 +77,10 @@ struct netns_ipv6 {
        spinlock_t              fib6_gc_lock;
        unsigned int             ip6_rt_gc_expire;
        unsigned long            ip6_rt_last_gc;
+       unsigned char           flowlabel_has_excl;
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-       unsigned int            fib6_rules_require_fldissect;
        bool                    fib6_has_custom_rules;
+       unsigned int            fib6_rules_require_fldissect;
 #ifdef CONFIG_IPV6_SUBTREES
        unsigned int            fib6_routes_require_src;
 #endif
index ff9b508..50aecd2 100644 (file)
@@ -507,7 +507,7 @@ struct sock {
 #endif
        u16                     sk_tsflags;
        u8                      sk_shutdown;
-       u32                     sk_tskey;
+       atomic_t                sk_tskey;
        atomic_t                sk_zckey;
 
        u8                      sk_clockid;
@@ -2667,7 +2667,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
                __sock_tx_timestamp(tsflags, tx_flags);
                if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
                    tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
-                       *tskey = sk->sk_tskey++;
+                       *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
        }
        if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
                *tx_flags |= SKBTX_WIFI_STATUS;
index fdb41e8..76aa6f1 100644 (file)
@@ -1568,7 +1568,6 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
 int xfrm_init_replay(struct xfrm_state *x);
-u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
 int xfrm_init_state(struct xfrm_state *x);
@@ -1681,14 +1680,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
               const struct xfrm_migrate *m, int num_bundles,
               const struct xfrm_kmaddress *k,
               const struct xfrm_encap_tmpl *encap);
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+                                               u32 if_id);
 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
                                      struct xfrm_migrate *m,
                                      struct xfrm_encap_tmpl *encap);
 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
                 struct xfrm_migrate *m, int num_bundles,
                 struct xfrm_kmaddress *k, struct net *net,
-                struct xfrm_encap_tmpl *encap);
+                struct xfrm_encap_tmpl *encap, u32 if_id);
 #endif
 
 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
index 90ae8d1..bae490c 100644 (file)
@@ -7,7 +7,8 @@
 #ifndef __FSL_DPAA2_FD_H
 #define __FSL_DPAA2_FD_H
 
-#include <linux/kernel.h>
+#include <linux/byteorder/generic.h>
+#include <linux/types.h>
 
 /**
  * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
index 7614fee..edd601f 100644 (file)
@@ -13,7 +13,8 @@
 #define _ASM_POWERPC_IMMAP_QE_H
 #ifdef __KERNEL__
 
-#include <linux/kernel.h>
+#include <linux/types.h>
+
 #include <asm/io.h>
 
 #define QE_IMMAP_SIZE  (1024 * 1024)   /* 1MB from 1MB+IMMR */
index b6febe2..43ea830 100644 (file)
@@ -10,8 +10,8 @@
 #ifndef _QE_TDM_H_
 #define _QE_TDM_H_
 
-#include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/types.h>
 
 #include <soc/fsl/qe/immap_qe.h>
 #include <soc/fsl/qe/qe.h>
@@ -19,6 +19,8 @@
 #include <soc/fsl/qe/ucc.h>
 #include <soc/fsl/qe/ucc_fast.h>
 
+struct device_node;
+
 /* SI RAM entries */
 #define SIR_LAST       0x0001
 #define SIR_BYTE       0x0002
index 9696a5b..ad60b87 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef __UCC_FAST_H__
 #define __UCC_FAST_H__
 
-#include <linux/kernel.h>
+#include <linux/types.h>
 
 #include <soc/fsl/qe/immap_qe.h>
 #include <soc/fsl/qe/qe.h>
index 11a216e..7548ce8 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef __UCC_SLOW_H__
 #define __UCC_SLOW_H__
 
-#include <linux/kernel.h>
+#include <linux/types.h>
 
 #include <soc/fsl/qe/immap_qe.h>
 #include <soc/fsl/qe/qe.h>
index 225ec87..7989d94 100644 (file)
 #define KEY_PAUSECD            201
 #define KEY_PROG3              202
 #define KEY_PROG4              203
-#define KEY_DASHBOARD          204     /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS   204     /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD          KEY_ALL_APPLICATIONS
 #define KEY_SUSPEND            205
 #define KEY_CLOSE              206     /* AC Close */
 #define KEY_PLAY               207
 #define KEY_ASSISTANT          0x247   /* AL Context-aware desktop assistant */
 #define KEY_KBD_LAYOUT_NEXT    0x248   /* AC Next Keyboard Layout Select */
 #define KEY_EMOJI_PICKER       0x249   /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE            0x24a   /* Start or Stop Voice Dictation Session (HUTRR99) */
 
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
index 5191b57..507ee1f 100644 (file)
@@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_VM_GPA_BITS 207
 #define KVM_CAP_XSAVE2 208
 #define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 4b33950..2607102 100644 (file)
@@ -106,7 +106,7 @@ enum ip_conntrack_status {
        IPS_NAT_CLASH = IPS_UNTRACKED,
 #endif
 
-       /* Conntrack got a helper explicitly attached via CT target. */
+       /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
        IPS_HELPER_BIT = 13,
        IPS_HELPER = (1 << IPS_HELPER_BIT),
 
index 4e29d78..65e13a0 100644 (file)
@@ -511,6 +511,12 @@ struct xfrm_user_offload {
        int                             ifindex;
        __u8                            flags;
 };
+/* This flag was exposed without any kernel code that supporting it.
+ * Unfortunately, strongswan has the code that uses sets this flag,
+ * which makes impossible to reuse this bit.
+ *
+ * So leave it here to make sure that it won't be reused by mistake.
+ */
 #define XFRM_OFFLOAD_IPV6      1
 #define XFRM_OFFLOAD_INBOUND   2
 
index fce5d43..a83928c 100644 (file)
@@ -185,7 +185,7 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
        case AUDITSC_EXECVE:
                return mask & AUDIT_PERM_EXEC;
        case AUDITSC_OPENAT2:
-               return mask & ACC_MODE((u32)((struct open_how *)ctx->argv[2])->flags);
+               return mask & ACC_MODE((u32)ctx->openat2.flags);
        default:
                return 0;
        }
index e16dafe..3e23b3f 100644 (file)
@@ -5688,7 +5688,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
                        }
                        if (check_ptr_off_reg(env, reg, regno))
                                return -EINVAL;
-               } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
+               } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
+                          (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
                        const struct btf_type *reg_ref_t;
                        const struct btf *reg_btf;
                        const char *reg_ref_tname;
@@ -5706,7 +5707,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
                                reg_ref_id = reg->btf_id;
                        } else {
                                reg_btf = btf_vmlinux;
-                               reg_ref_id = *reg2btf_ids[reg->type];
+                               reg_ref_id = *reg2btf_ids[base_type(reg->type)];
                        }
 
                        reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
index 01cfdf4..55c0842 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  */
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/bpf-cgroup.h>
 #include <linux/rcupdate.h>
 #include <linux/random.h>
@@ -1075,6 +1076,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
        void *key;
        u32 idx;
 
+       BTF_TYPE_EMIT(struct bpf_timer);
        callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
        if (!callback_fn)
                goto out;
index fa4505f..ca70fe6 100644 (file)
@@ -1355,6 +1355,7 @@ int generic_map_delete_batch(struct bpf_map *map,
                maybe_wait_bpf_programs(map);
                if (err)
                        break;
+               cond_resched();
        }
        if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
                err = -EFAULT;
@@ -1412,6 +1413,7 @@ int generic_map_update_batch(struct bpf_map *map,
 
                if (err)
                        break;
+               cond_resched();
        }
 
        if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
@@ -1509,6 +1511,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
                swap(prev_key, key);
                retry = MAP_LOOKUP_RETRIES;
                cp++;
+               cond_resched();
        }
 
        if (err == -EFAULT)
index 0e877db..afc6c0e 100644 (file)
@@ -546,6 +546,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
                                          char *buf, size_t nbytes, loff_t off)
 {
        struct cgroup *cgrp;
+       struct cgroup_file_ctx *ctx;
 
        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
 
@@ -553,8 +554,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
         * Release agent gets called with all capabilities,
         * require capabilities to set release agent.
         */
-       if ((of->file->f_cred->user_ns != &init_user_ns) ||
-           !capable(CAP_SYS_ADMIN))
+       ctx = of->priv;
+       if ((ctx->ns->user_ns != &init_user_ns) ||
+           !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
                return -EPERM;
 
        cgrp = cgroup_kn_lock_live(of->kn, false);
index 9d05c3c..a557eea 100644 (file)
@@ -6166,6 +6166,20 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
        if (ret)
                goto err;
 
+       /*
+        * Spawning a task directly into a cgroup works by passing a file
+        * descriptor to the target cgroup directory. This can even be an O_PATH
+        * file descriptor. But it can never be a cgroup.procs file descriptor.
+        * This was done on purpose so spawning into a cgroup could be
+        * conceptualized as an atomic
+        *
+        *   fd = openat(dfd_cgroup, "cgroup.procs", ...);
+        *   write(fd, <child-pid>, ...);
+        *
+        * sequence, i.e. it's a shorthand for the caller opening and writing
+        * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us
+        * to always use the caller's credentials.
+        */
        ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
                                        !(kargs->flags & CLONE_THREAD),
                                        current->nsproxy->cgroup_ns);
index 4c7254e..5de1844 100644 (file)
@@ -2289,6 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
        cgroup_taskset_first(tset, &css);
        cs = css_cs(css);
 
+       cpus_read_lock();
        percpu_down_write(&cpuset_rwsem);
 
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2342,6 +2343,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
                wake_up(&cpuset_attach_wq);
 
        percpu_up_write(&cpuset_rwsem);
+       cpus_read_unlock();
 }
 
 /* The various types of files and directories in a cpuset file system */
@@ -3522,8 +3524,8 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
        return cs;
 }
 
-/**
- * cpuset_node_allowed - Can we allocate on a memory node?
+/*
+ * __cpuset_node_allowed - Can we allocate on a memory node?
  * @node: is this an allowed node?
  * @gfp_mask: memory allocation flags
  *
@@ -3694,8 +3696,8 @@ void cpuset_print_current_mems_allowed(void)
 
 int cpuset_memory_pressure_enabled __read_mostly;
 
-/**
- * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
+/*
+ * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
  *
  * Keep a running average of the rate of synchronous (direct)
  * page reclaim efforts initiated by tasks in each cpuset.
@@ -3710,7 +3712,7 @@ int cpuset_memory_pressure_enabled __read_mostly;
  * "memory_pressure".  Value displayed is an integer
  * representing the recent rate of entry into the synchronous
  * (direct) page reclaim by any task attached to the cpuset.
- **/
+ */
 
 void __cpuset_memory_pressure_bump(void)
 {
index e9ffb0c..07df6d9 100644 (file)
@@ -16,7 +16,7 @@ CONFIG_SYMBOLIC_ERRNAME=y
 #
 # Compile-time checks and compiler options
 #
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_FRAME_WARN=2048
 CONFIG_SECTION_MISMATCH_WARN_ONLY=y
index 473d17c..933155c 100644 (file)
@@ -665,21 +665,16 @@ EXPORT_SYMBOL(cred_fscmp);
 
 int set_cred_ucounts(struct cred *new)
 {
-       struct task_struct *task = current;
-       const struct cred *old = task->real_cred;
        struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
 
-       if (new->user == old->user && new->user_ns == old->user_ns)
-               return 0;
-
        /*
         * This optimization is needed because alloc_ucounts() uses locks
         * for table lookups.
         */
-       if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
+       if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid))
                return 0;
 
-       if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
+       if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
                return -EAGAIN;
 
        new->ucounts = new_ucounts;
index f1e7ea1..6db1c47 100644 (file)
@@ -627,9 +627,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
        for (i = 0; i < nr_slots(alloc_size + offset); i++)
                mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
        tlb_addr = slot_addr(mem->start, index) + offset;
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-               swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
+       /*
+        * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
+        * to the tlb buffer, if we knew for sure the device will
+        * overwirte the entire current content. But we don't. Thus
+        * unconditional bounce may prevent leaking swiotlb content (i.e.
+        * kernel memory) to user-space.
+        */
+       swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
        return tlb_addr;
 }
 
@@ -696,10 +701,13 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
                size_t size, enum dma_data_direction dir)
 {
-       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
-               swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
-       else
-               BUG_ON(dir != DMA_FROM_DEVICE);
+       /*
+        * Unconditional bounce is necessary to avoid corruption on
+        * sync_*_for_cpu or dma_ummap_* when the device didn't overwrite
+        * the whole lengt of the bounce buffer.
+        */
+       swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
+       BUG_ON(!valid_dma_direction(dir));
 }
 
 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
index 57c7197..6859229 100644 (file)
@@ -839,7 +839,7 @@ static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
  */
 static void perf_cgroup_switch(struct task_struct *task, int mode)
 {
-       struct perf_cpu_context *cpuctx;
+       struct perf_cpu_context *cpuctx, *tmp;
        struct list_head *list;
        unsigned long flags;
 
@@ -850,7 +850,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_save(flags);
 
        list = this_cpu_ptr(&cgrp_cpuctx_list);
-       list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
+       list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
                WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
 
                perf_ctx_lock(cpuctx, cpuctx->task_ctx);
index d75a528..f1e8900 100644 (file)
@@ -366,14 +366,14 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
                *new = data_race(*orig);
                INIT_LIST_HEAD(&new->anon_vma_chain);
                new->vm_next = new->vm_prev = NULL;
-               dup_vma_anon_name(orig, new);
+               dup_anon_vma_name(orig, new);
        }
        return new;
 }
 
 void vm_area_free(struct vm_area_struct *vma)
 {
-       free_vma_anon_name(vma);
+       free_anon_vma_name(vma);
        kmem_cache_free(vm_area_cachep, vma);
 }
 
@@ -2021,18 +2021,18 @@ static __latent_entropy struct task_struct *copy_process(
 #ifdef CONFIG_PROVE_LOCKING
        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
+       retval = copy_creds(p, clone_flags);
+       if (retval < 0)
+               goto bad_fork_free;
+
        retval = -EAGAIN;
        if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
                if (p->real_cred->user != INIT_USER &&
                    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
-                       goto bad_fork_free;
+                       goto bad_fork_cleanup_count;
        }
        current->flags &= ~PF_NPROC_EXCEEDED;
 
-       retval = copy_creds(p, clone_flags);
-       if (retval < 0)
-               goto bad_fork_free;
-
        /*
         * If multiple threads are within copy_process(), then this check
         * triggers too late. This doesn't hurt, the check is only there
@@ -2266,6 +2266,17 @@ static __latent_entropy struct task_struct *copy_process(
        if (retval)
                goto bad_fork_put_pidfd;
 
+       /*
+        * Now that the cgroups are pinned, re-clone the parent cgroup and put
+        * the new task on the correct runqueue. All this *before* the task
+        * becomes visible.
+        *
+        * This isn't part of ->can_fork() because while the re-cloning is
+        * cgroup specific, it unconditionally needs to place the task on a
+        * runqueue.
+        */
+       sched_cgroup_fork(p, args);
+
        /*
         * From this point on we must avoid any synchronous user-space
         * communication until we take the tasklist-lock. In particular, we do
@@ -2323,10 +2334,6 @@ static __latent_entropy struct task_struct *copy_process(
                goto bad_fork_cancel_cgroup;
        }
 
-       /* past the last point of failure */
-       if (pidfile)
-               fd_install(pidfd, pidfile);
-
        init_task_pid_links(p);
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -2375,8 +2382,11 @@ static __latent_entropy struct task_struct *copy_process(
        syscall_tracepoint_update(p);
        write_unlock_irq(&tasklist_lock);
 
+       if (pidfile)
+               fd_install(pidfd, pidfile);
+
        proc_fork_connector(p);
-       sched_post_fork(p, args);
+       sched_post_fork(p);
        cgroup_post_fork(p, args);
        perf_event_fork(p);
 
index 4a882f8..f8a0212 100644 (file)
@@ -3462,7 +3462,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
        u16 chain_hlock = chain_hlocks[chain->base + i];
        unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
 
-       return lock_classes + class_idx - 1;
+       return lock_classes + class_idx;
 }
 
 /*
@@ -3530,7 +3530,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
                hlock_id = chain_hlocks[chain->base + i];
                chain_key = print_chain_key_iteration(hlock_id, chain_key);
 
-               print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
+               print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
                printk("\n");
        }
 }
index b01c69c..ffef98a 100644 (file)
@@ -250,6 +250,7 @@ void module_decompress_cleanup(struct load_info *info)
        info->max_pages = info->used_pages = 0;
 }
 
+#ifdef CONFIG_SYSFS
 static ssize_t compression_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *buf)
 {
@@ -269,3 +270,4 @@ static int __init module_decompress_sysfs_init(void)
        return 0;
 }
 late_initcall(module_decompress_sysfs_init);
+#endif
index 44169f3..7e64607 100644 (file)
@@ -504,7 +504,10 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
                                        struct kobj_attribute *attr,
                                        char *buf)
 {
-       return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
+       if (!pm_wakeup_irq())
+               return -ENODATA;
+
+       return sprintf(buf, "%u\n", pm_wakeup_irq());
 }
 
 power_attr_ro(pm_wakeup_irq);
index b7e7798..11b570f 100644 (file)
@@ -134,7 +134,7 @@ int freeze_processes(void)
        if (!pm_freezing)
                atomic_inc(&system_freezing_cnt);
 
-       pm_wakeup_clear(true);
+       pm_wakeup_clear(0);
        pr_info("Freezing user space processes ... ");
        pm_freezing = true;
        error = try_to_freeze_tasks(true);
index 80cc1f0..6fcdee7 100644 (file)
@@ -136,8 +136,6 @@ static void s2idle_loop(void)
                        break;
                }
 
-               pm_wakeup_clear(false);
-
                s2idle_enter();
        }
 
index 848eaa0..9745613 100644 (file)
@@ -4424,6 +4424,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
 
        init_entity_runnable_average(&p->se);
 
+
 #ifdef CONFIG_SCHED_INFO
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -4439,18 +4440,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        return 0;
 }
 
-void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
 {
        unsigned long flags;
-#ifdef CONFIG_CGROUP_SCHED
-       struct task_group *tg;
-#endif
 
+       /*
+        * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+        * required yet, but lockdep gets upset if rules are violated.
+        */
        raw_spin_lock_irqsave(&p->pi_lock, flags);
 #ifdef CONFIG_CGROUP_SCHED
-       tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
-                         struct task_group, css);
-       p->sched_task_group = autogroup_task_group(p, tg);
+       if (1) {
+               struct task_group *tg;
+               tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
+                                 struct task_group, css);
+               tg = autogroup_task_group(p, tg);
+               p->sched_task_group = tg;
+       }
 #endif
        rseq_migrate(p);
        /*
@@ -4461,7 +4467,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
        if (p->sched_class->task_fork)
                p->sched_class->task_fork(p);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
 
+void sched_post_fork(struct task_struct *p)
+{
        uclamp_post_fork(p);
 }
 
index 4d8f44a..db10e73 100644 (file)
@@ -29,6 +29,9 @@
 #include <linux/syscalls.h>
 #include <linux/sysctl.h>
 
+/* Not exposed in headers: strictly internal use only. */
+#define SECCOMP_MODE_DEAD      (SECCOMP_MODE_FILTER + 1)
+
 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
 #include <asm/syscall.h>
 #endif
@@ -1010,6 +1013,7 @@ static void __secure_computing_strict(int this_syscall)
 #ifdef SECCOMP_DEBUG
        dump_stack();
 #endif
+       current->seccomp.mode = SECCOMP_MODE_DEAD;
        seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
        do_exit(SIGKILL);
 }
@@ -1261,6 +1265,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
        case SECCOMP_RET_KILL_THREAD:
        case SECCOMP_RET_KILL_PROCESS:
        default:
+               current->seccomp.mode = SECCOMP_MODE_DEAD;
                seccomp_log(this_syscall, SIGSYS, action, true);
                /* Dump core only if this is the last remaining thread. */
                if (action != SECCOMP_RET_KILL_THREAD ||
@@ -1309,6 +1314,11 @@ int __secure_computing(const struct seccomp_data *sd)
                return 0;
        case SECCOMP_MODE_FILTER:
                return __seccomp_filter(this_syscall, sd, false);
+       /* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
+       case SECCOMP_MODE_DEAD:
+               WARN_ON_ONCE(1);
+               do_exit(SIGKILL);
+               return -1;
        default:
                BUG();
        }
index 3860273..9b04631 100644 (file)
@@ -1342,9 +1342,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
        }
        /*
         * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
-        * debugging to leave init killable.
+        * debugging to leave init killable. But HANDLER_EXIT is always fatal.
         */
-       if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
+       if (action->sa.sa_handler == SIG_DFL &&
+           (!t->ptrace || (handler == HANDLER_EXIT)))
                t->signal->flags &= ~SIGNAL_UNKILLABLE;
        ret = send_signal(sig, info, t, PIDTYPE_PID);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
index ecc4cf0..5b0e172 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/export.h>
 #include <linux/mm.h>
+#include <linux/mm_inline.h>
 #include <linux/utsname.h>
 #include <linux/mman.h>
 #include <linux/reboot.h>
@@ -472,6 +473,16 @@ static int set_user(struct cred *new)
        if (!new_user)
                return -EAGAIN;
 
+       free_uid(new->user);
+       new->user = new_user;
+       return 0;
+}
+
+static void flag_nproc_exceeded(struct cred *new)
+{
+       if (new->ucounts == current_ucounts())
+               return;
+
        /*
         * We don't fail in case of NPROC limit excess here because too many
         * poorly written programs don't check set*uid() return code, assuming
@@ -480,15 +491,10 @@ static int set_user(struct cred *new)
         * failure to the execve() stage.
         */
        if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
-                       new_user != INIT_USER &&
-                       !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
+                       new->user != INIT_USER)
                current->flags |= PF_NPROC_EXCEEDED;
        else
                current->flags &= ~PF_NPROC_EXCEEDED;
-
-       free_uid(new->user);
-       new->user = new_user;
-       return 0;
 }
 
 /*
@@ -563,6 +569,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
        if (retval < 0)
                goto error;
 
+       flag_nproc_exceeded(new);
        return commit_creds(new);
 
 error:
@@ -625,6 +632,7 @@ long __sys_setuid(uid_t uid)
        if (retval < 0)
                goto error;
 
+       flag_nproc_exceeded(new);
        return commit_creds(new);
 
 error:
@@ -704,6 +712,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
        if (retval < 0)
                goto error;
 
+       flag_nproc_exceeded(new);
        return commit_creds(new);
 
 error:
@@ -2278,15 +2287,16 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        const char __user *uname;
-       char *name, *pch;
+       struct anon_vma_name *anon_name = NULL;
        int error;
 
        switch (opt) {
        case PR_SET_VMA_ANON_NAME:
                uname = (const char __user *)arg;
                if (uname) {
-                       name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
+                       char *name, *pch;
 
+                       name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
                        if (IS_ERR(name))
                                return PTR_ERR(name);
 
@@ -2296,15 +2306,18 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr,
                                        return -EINVAL;
                                }
                        }
-               } else {
-                       /* Reset the name */
-                       name = NULL;
+                       /* anon_vma has its own copy */
+                       anon_name = anon_vma_name_alloc(name);
+                       kfree(name);
+                       if (!anon_name)
+                               return -ENOMEM;
+
                }
 
                mmap_write_lock(mm);
-               error = madvise_set_anon_name(mm, addr, size, name);
+               error = madvise_set_anon_name(mm, addr, size, anon_name);
                mmap_write_unlock(mm);
-               kfree(name);
+               anon_vma_name_put(anon_name);
                break;
        default:
                error = -EINVAL;
index 5ae443b..730ab56 100644 (file)
@@ -180,6 +180,10 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
        return ret;
 }
 
+void __weak unpriv_ebpf_notify(int new_state)
+{
+}
+
 static int bpf_unpriv_handler(struct ctl_table *table, int write,
                              void *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -197,6 +201,9 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
                        return -EPERM;
                *(int *)table->data = unpriv_enable;
        }
+
+       unpriv_ebpf_notify(unpriv_enable);
+
        return ret;
 }
 #endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
index af68a67..21dea90 100644 (file)
@@ -310,10 +310,20 @@ record_it:
        local_irq_restore(flags);
 }
 
-static void blk_trace_free(struct blk_trace *bt)
+static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
 {
        relay_close(bt->rchan);
-       debugfs_remove(bt->dir);
+
+       /*
+        * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
+        * under 'q->debugfs_dir', thus lookup and remove them.
+        */
+       if (!bt->dir) {
+               debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir));
+               debugfs_remove(debugfs_lookup("msg", q->debugfs_dir));
+       } else {
+               debugfs_remove(bt->dir);
+       }
        free_percpu(bt->sequence);
        free_percpu(bt->msg_data);
        kfree(bt);
@@ -335,10 +345,10 @@ static void put_probe_ref(void)
        mutex_unlock(&blk_probe_mutex);
 }
 
-static void blk_trace_cleanup(struct blk_trace *bt)
+static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
 {
        synchronize_rcu();
-       blk_trace_free(bt);
+       blk_trace_free(q, bt);
        put_probe_ref();
 }
 
@@ -352,7 +362,7 @@ static int __blk_trace_remove(struct request_queue *q)
                return -EINVAL;
 
        if (bt->trace_state != Blktrace_running)
-               blk_trace_cleanup(bt);
+               blk_trace_cleanup(q, bt);
 
        return 0;
 }
@@ -572,7 +582,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        ret = 0;
 err:
        if (ret)
-               blk_trace_free(bt);
+               blk_trace_free(q, bt);
        return ret;
 }
 
@@ -1616,7 +1626,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
 
        put_probe_ref();
        synchronize_rcu();
-       blk_trace_free(bt);
+       blk_trace_free(q, bt);
        return 0;
 }
 
@@ -1647,7 +1657,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
        return 0;
 
 free_bt:
-       blk_trace_free(bt);
+       blk_trace_free(q, bt);
        return ret;
 }
 
index f9feb19..a4b462b 100644 (file)
@@ -7191,7 +7191,6 @@ static int __init ftrace_nodyn_init(void)
 core_initcall(ftrace_nodyn_init);
 
 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
-static inline void ftrace_startup_enable(int command) { }
 static inline void ftrace_startup_all(int command) { }
 
 # define ftrace_startup_sysctl()       do { } while (0)
index c860f58..eb44418 100644 (file)
@@ -235,7 +235,7 @@ static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 static int __init set_trace_boot_options(char *str)
 {
        strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
-       return 0;
+       return 1;
 }
 __setup("trace_options=", set_trace_boot_options);
 
@@ -246,12 +246,16 @@ static int __init set_trace_boot_clock(char *str)
 {
        strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
        trace_boot_clock = trace_boot_clock_buf;
-       return 0;
+       return 1;
 }
 __setup("trace_clock=", set_trace_boot_clock);
 
 static int __init set_tracepoint_printk(char *str)
 {
+       /* Ignore the "tp_printk_stop_on_boot" param */
+       if (*str == '_')
+               return 0;
+
        if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
                tracepoint_printk = 1;
        return 1;
@@ -1470,10 +1474,12 @@ static int __init set_buf_size(char *str)
        if (!str)
                return 0;
        buf_size = memparse(str, &str);
-       /* nr_entries can not be zero */
-       if (buf_size == 0)
-               return 0;
-       trace_buf_size = buf_size;
+       /*
+        * nr_entries can not be zero and the startup
+        * tests require some buffer space. Therefore
+        * ensure we have at least 4096 bytes of buffer.
+        */
+       trace_buf_size = max(4096UL, buf_size);
        return 1;
 }
 __setup("trace_buf_size=", set_buf_size);
index d038ddb..c5b09c3 100644 (file)
@@ -136,7 +136,6 @@ struct kprobe_trace_entry_head {
 
 struct eprobe_trace_entry_head {
        struct trace_entry      ent;
-       unsigned int            type;
 };
 
 struct kretprobe_trace_entry_head {
index 191db32..541aa13 100644 (file)
@@ -242,7 +242,6 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
 
 static int eprobe_event_define_fields(struct trace_event_call *event_call)
 {
-       int ret;
        struct eprobe_trace_entry_head field;
        struct trace_probe *tp;
 
@@ -250,8 +249,6 @@ static int eprobe_event_define_fields(struct trace_event_call *event_call)
        if (WARN_ON_ONCE(!tp))
                return -ENOENT;
 
-       DEFINE_FIELD(unsigned int, type, FIELD_STRING_TYPE, 0);
-
        return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
 }
 
@@ -270,7 +267,9 @@ print_eprobe_event(struct trace_iterator *iter, int flags,
        struct trace_event_call *pevent;
        struct trace_event *probed_event;
        struct trace_seq *s = &iter->seq;
+       struct trace_eprobe *ep;
        struct trace_probe *tp;
+       unsigned int type;
 
        field = (struct eprobe_trace_entry_head *)iter->ent;
        tp = trace_probe_primary_from_call(
@@ -278,15 +277,18 @@ print_eprobe_event(struct trace_iterator *iter, int flags,
        if (WARN_ON_ONCE(!tp))
                goto out;
 
+       ep = container_of(tp, struct trace_eprobe, tp);
+       type = ep->event->event.type;
+
        trace_seq_printf(s, "%s: (", trace_probe_name(tp));
 
-       probed_event = ftrace_find_event(field->type);
+       probed_event = ftrace_find_event(type);
        if (probed_event) {
                pevent = container_of(probed_event, struct trace_event_call, event);
                trace_seq_printf(s, "%s.%s", pevent->class->system,
                                 trace_event_name(pevent));
        } else {
-               trace_seq_printf(s, "%u", field->type);
+               trace_seq_printf(s, "%u", type);
        }
 
        trace_seq_putc(s, ')');
@@ -498,10 +500,6 @@ __eprobe_trace_func(struct eprobe_data *edata, void *rec)
                return;
 
        entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
-       if (edata->ep->event)
-               entry->type = edata->ep->event->event.type;
-       else
-               entry->type = 0;
        store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
 
        trace_event_buffer_commit(&fbuffer);
index ada87bf..dc7f733 100644 (file)
@@ -2289,9 +2289,9 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                        /*
                         * For backward compatibility, if field_name
                         * was "cpu", then we treat this the same as
-                        * common_cpu.
+                        * common_cpu. This also works for "CPU".
                         */
-                       if (strcmp(field_name, "cpu") == 0) {
+                       if (field && field->filter_type == FILTER_CPU) {
                                *flags |= HIST_FIELD_FL_CPU;
                        } else {
                                hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
@@ -4832,7 +4832,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
 
                        if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
                                cmp_fn = tracing_map_cmp_none;
-                       else if (!field)
+                       else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
                                cmp_fn = tracing_map_cmp_num(hist_field->size,
                                                             hist_field->is_signed);
                        else if (is_string_field(field))
index d00fee7..7eb9d04 100644 (file)
@@ -84,6 +84,20 @@ event_triggers_call(struct trace_event_file *file,
 }
 EXPORT_SYMBOL_GPL(event_triggers_call);
 
+bool __trace_trigger_soft_disabled(struct trace_event_file *file)
+{
+       unsigned long eflags = file->flags;
+
+       if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+               event_triggers_call(file, NULL, NULL, NULL);
+       if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+               return true;
+       if (eflags & EVENT_FILE_FL_PID_FILTER)
+               return trace_event_ignore_this_pid(file);
+       return false;
+}
+EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
+
 /**
  * event_triggers_post_call - Call 'post_triggers' for a trace event
  * @file: The trace_event_file associated with the event
@@ -1295,6 +1309,16 @@ traceon_trigger(struct event_trigger_data *data,
                struct trace_buffer *buffer, void *rec,
                struct ring_buffer_event *event)
 {
+       struct trace_event_file *file = data->private_data;
+
+       if (file) {
+               if (tracer_tracing_is_on(file->tr))
+                       return;
+
+               tracer_tracing_on(file->tr);
+               return;
+       }
+
        if (tracing_is_on())
                return;
 
@@ -1306,8 +1330,15 @@ traceon_count_trigger(struct event_trigger_data *data,
                      struct trace_buffer *buffer, void *rec,
                      struct ring_buffer_event *event)
 {
-       if (tracing_is_on())
-               return;
+       struct trace_event_file *file = data->private_data;
+
+       if (file) {
+               if (tracer_tracing_is_on(file->tr))
+                       return;
+       } else {
+               if (tracing_is_on())
+                       return;
+       }
 
        if (!data->count)
                return;
@@ -1315,7 +1346,10 @@ traceon_count_trigger(struct event_trigger_data *data,
        if (data->count != -1)
                (data->count)--;
 
-       tracing_on();
+       if (file)
+               tracer_tracing_on(file->tr);
+       else
+               tracing_on();
 }
 
 static void
@@ -1323,6 +1357,16 @@ traceoff_trigger(struct event_trigger_data *data,
                 struct trace_buffer *buffer, void *rec,
                 struct ring_buffer_event *event)
 {
+       struct trace_event_file *file = data->private_data;
+
+       if (file) {
+               if (!tracer_tracing_is_on(file->tr))
+                       return;
+
+               tracer_tracing_off(file->tr);
+               return;
+       }
+
        if (!tracing_is_on())
                return;
 
@@ -1334,8 +1378,15 @@ traceoff_count_trigger(struct event_trigger_data *data,
                       struct trace_buffer *buffer, void *rec,
                       struct ring_buffer_event *event)
 {
-       if (!tracing_is_on())
-               return;
+       struct trace_event_file *file = data->private_data;
+
+       if (file) {
+               if (!tracer_tracing_is_on(file->tr))
+                       return;
+       } else {
+               if (!tracing_is_on())
+                       return;
+       }
 
        if (!data->count)
                return;
@@ -1343,7 +1394,10 @@ traceoff_count_trigger(struct event_trigger_data *data,
        if (data->count != -1)
                (data->count)--;
 
-       tracing_off();
+       if (file)
+               tracer_tracing_off(file->tr);
+       else
+               tracing_off();
 }
 
 static int
@@ -1540,7 +1594,12 @@ stacktrace_trigger(struct event_trigger_data *data,
                   struct trace_buffer *buffer,  void *rec,
                   struct ring_buffer_event *event)
 {
-       trace_dump_stack(STACK_SKIP);
+       struct trace_event_file *file = data->private_data;
+
+       if (file)
+               __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
+       else
+               trace_dump_stack(STACK_SKIP);
 }
 
 static void
index 508f14a..b62fd78 100644 (file)
@@ -32,7 +32,7 @@ static int __init set_kprobe_boot_events(char *str)
        strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
        disable_tracing_selftest("running kprobe events");
 
-       return 0;
+       return 1;
 }
 __setup("kprobe_event=", set_kprobe_boot_events);
 
index 870a08d..cfddb30 100644 (file)
@@ -1436,6 +1436,37 @@ out:
 static struct cpumask osnoise_cpumask;
 static struct cpumask save_cpumask;
 
+/*
+ * osnoise_sleep - sleep until the next period
+ */
+static void osnoise_sleep(void)
+{
+       u64 interval;
+       ktime_t wake_time;
+
+       mutex_lock(&interface_lock);
+       interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
+       mutex_unlock(&interface_lock);
+
+       /*
+        * differently from hwlat_detector, the osnoise tracer can run
+        * without a pause because preemption is on.
+        */
+       if (!interval) {
+               /* Let synchronize_rcu_tasks() make progress */
+               cond_resched_tasks_rcu_qs();
+               return;
+       }
+
+       wake_time = ktime_add_us(ktime_get(), interval);
+       __set_current_state(TASK_INTERRUPTIBLE);
+
+       while (schedule_hrtimeout_range(&wake_time, 0, HRTIMER_MODE_ABS)) {
+               if (kthread_should_stop())
+                       break;
+       }
+}
+
 /*
  * osnoise_main - The osnoise detection kernel thread
  *
@@ -1444,30 +1475,10 @@ static struct cpumask save_cpumask;
  */
 static int osnoise_main(void *data)
 {
-       u64 interval;
 
        while (!kthread_should_stop()) {
-
                run_osnoise();
-
-               mutex_lock(&interface_lock);
-               interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
-               mutex_unlock(&interface_lock);
-
-               do_div(interval, USEC_PER_MSEC);
-
-               /*
-                * differently from hwlat_detector, the osnoise tracer can run
-                * without a pause because preemption is on.
-                */
-               if (interval < 1) {
-                       /* Let synchronize_rcu_tasks() make progress */
-                       cond_resched_tasks_rcu_qs();
-                       continue;
-               }
-
-               if (msleep_interruptible(interval))
-                       break;
+               osnoise_sleep();
        }
 
        return 0;
index 73d9017..80863c6 100644 (file)
@@ -871,15 +871,15 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
        switch (ptype) {
        case PROBE_PRINT_NORMAL:
                fmt = "(%lx)";
-               arg = "REC->" FIELD_STRING_IP;
+               arg = "REC->" FIELD_STRING_IP;
                break;
        case PROBE_PRINT_RETURN:
                fmt = "(%lx <- %lx)";
-               arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
+               arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
                break;
        case PROBE_PRINT_EVENT:
-               fmt = "(%u)";
-               arg = "REC->" FIELD_STRING_TYPE;
+               fmt = "";
+               arg = "";
                break;
        default:
                WARN_ON_ONCE(1);
@@ -903,7 +903,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
                                        parg->type->fmt);
        }
 
-       pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", arg);
+       pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", arg);
 
        for (i = 0; i < tp->nr_args; i++) {
                parg = tp->args + i;
index 99e7a5d..92cc149 100644 (file)
@@ -38,7 +38,6 @@
 #define FIELD_STRING_IP                "__probe_ip"
 #define FIELD_STRING_RETIP     "__probe_ret_ip"
 #define FIELD_STRING_FUNC      "__probe_func"
-#define FIELD_STRING_TYPE      "__probe_type"
 
 #undef DEFINE_FIELD
 #define DEFINE_FIELD(type, item, name, is_signed)                      \
index afd937a..abcadbe 100644 (file)
@@ -784,9 +784,7 @@ static struct fgraph_ops fgraph_ops __initdata  = {
        .retfunc                = &trace_graph_return,
 };
 
-#if defined(CONFIG_DYNAMIC_FTRACE) && \
-    defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
-#define TEST_DIRECT_TRAMP
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 noinline __noclone static void trace_direct_tramp(void) { }
 #endif
 
@@ -849,7 +847,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
                goto out;
        }
 
-#ifdef TEST_DIRECT_TRAMP
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        tracing_reset_online_cpus(&tr->array_buffer);
        set_graph_array(tr);
 
index 65b5974..06ea04d 100644 (file)
@@ -350,7 +350,8 @@ bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsign
        if (rlimit > LONG_MAX)
                max = LONG_MAX;
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               if (get_ucounts_value(iter, type) > max)
+               long val = get_ucounts_value(iter, type);
+               if (val < 0 || val > max)
                        return true;
                max = READ_ONCE(iter->ns->ucount_max[type]);
        }
index 6b2e3ca..5481ba4 100644 (file)
@@ -58,6 +58,18 @@ static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
        cred->user_ns = user_ns;
 }
 
+static unsigned long enforced_nproc_rlimit(void)
+{
+       unsigned long limit = RLIM_INFINITY;
+
+       /* Is RLIMIT_NPROC currently enforced? */
+       if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ||
+           (current_user_ns() != &init_user_ns))
+               limit = rlimit(RLIMIT_NPROC);
+
+       return limit;
+}
+
 /*
  * Create a new user namespace, deriving the creator from the user in the
  * passed credentials, and replacing that user with the new root user for the
@@ -122,7 +134,7 @@ int create_user_ns(struct cred *new)
        for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) {
                ns->ucount_max[i] = INT_MAX;
        }
-       set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC));
+       set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, enforced_nproc_rlimit());
        set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
        set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
        set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
index c80fde8..9b5a692 100644 (file)
@@ -45,7 +45,6 @@ config BITREVERSE
 config HAVE_ARCH_BITREVERSE
        bool
        default n
-       depends on BITREVERSE
        help
          This option enables the use of hardware bit-reversal instructions on
          architectures which support such operations.
index b0e0acd..6dd5330 100644 (file)
@@ -414,6 +414,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
                return 0;
 
        buf->ops = &page_cache_pipe_buf_ops;
+       buf->flags = 0;
        get_page(page);
        buf->page = page;
        buf->offset = offset;
@@ -577,6 +578,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
                        break;
 
                buf->ops = &default_pipe_buf_ops;
+               buf->flags = 0;
                buf->page = page;
                buf->offset = 0;
                buf->len = min_t(ssize_t, left, PAGE_SIZE);
index 26a5c90..3b413f8 100644 (file)
@@ -869,11 +869,14 @@ static void kmem_cache_invalid_free(struct kunit *test)
        kmem_cache_destroy(cache);
 }
 
+static void empty_cache_ctor(void *object) { }
+
 static void kmem_cache_double_destroy(struct kunit *test)
 {
        struct kmem_cache *cache;
 
-       cache = kmem_cache_create("test_cache", 200, 0, 0, NULL);
+       /* Provide a constructor to prevent cache merging. */
+       cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
        kmem_cache_destroy(cache);
        KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
index 61895cc..f294db8 100644 (file)
@@ -4159,10 +4159,10 @@ static int __init hugepages_setup(char *s)
                                pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
                                return 0;
                        }
+                       if (tmp >= nr_online_nodes)
+                               goto invalid;
                        node = tmp;
                        p += count + 1;
-                       if (node < 0 || node >= nr_online_nodes)
-                               goto invalid;
                        /* Parse hugepages */
                        if (sscanf(p, "%lu%n", &tmp, &count) != 1)
                                goto invalid;
@@ -4851,14 +4851,13 @@ again:
 }
 
 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
-                         unsigned long new_addr, pte_t *src_pte)
+                         unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
 {
        struct hstate *h = hstate_vma(vma);
        struct mm_struct *mm = vma->vm_mm;
-       pte_t *dst_pte, pte;
        spinlock_t *src_ptl, *dst_ptl;
+       pte_t pte;
 
-       dst_pte = huge_pte_offset(mm, new_addr, huge_page_size(h));
        dst_ptl = huge_pte_lock(h, mm, dst_pte);
        src_ptl = huge_pte_lockptr(h, mm, src_pte);
 
@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
                if (!dst_pte)
                        break;
 
-               move_huge_pte(vma, old_addr, new_addr, src_pte);
+               move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
        }
        flush_tlb_range(vma, old_end - len, old_end);
        mmu_notifier_invalidate_range_end(&range);
index 5ad40e3..13128fa 100644 (file)
@@ -47,7 +47,8 @@
 
 static bool kfence_enabled __read_mostly;
 
-static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
 
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
index a22b1af..50dbb81 100644 (file)
@@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
         * 100x the sample interval should be more than enough to ensure we get
         * a KFENCE allocation eventually.
         */
-       timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+       timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
        /*
         * Especially for non-preemption kernels, ensure the allocation-gate
         * timer can catch up: after @resched_after, every failed allocation
         * attempt yields, to ensure the allocation-gate timer is scheduled.
         */
-       resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
+       resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
        do {
                if (test_cache)
                        alloc = kmem_cache_alloc(test_cache, gfp);
@@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test)
        int i;
 
        /* Skip if we think it'd take too long. */
-       KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
+       KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
 
        setup_test_cache(test, size, 0, NULL);
        buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
@@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)
         * 100x the sample interval should be more than enough to ensure we get
         * a KFENCE allocation eventually.
         */
-       timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+       timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
        do {
                void *objects[100];
                int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
index 5604064..38d0f51 100644 (file)
@@ -65,7 +65,7 @@ static int madvise_need_mmap_write(int behavior)
 }
 
 #ifdef CONFIG_ANON_VMA_NAME
-static struct anon_vma_name *anon_vma_name_alloc(const char *name)
+struct anon_vma_name *anon_vma_name_alloc(const char *name)
 {
        struct anon_vma_name *anon_name;
        size_t count;
@@ -81,78 +81,48 @@ static struct anon_vma_name *anon_vma_name_alloc(const char *name)
        return anon_name;
 }
 
-static void vma_anon_name_free(struct kref *kref)
+void anon_vma_name_free(struct kref *kref)
 {
        struct anon_vma_name *anon_name =
                        container_of(kref, struct anon_vma_name, kref);
        kfree(anon_name);
 }
 
-static inline bool has_vma_anon_name(struct vm_area_struct *vma)
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
 {
-       return !vma->vm_file && vma->anon_name;
-}
-
-const char *vma_anon_name(struct vm_area_struct *vma)
-{
-       if (!has_vma_anon_name(vma))
-               return NULL;
-
        mmap_assert_locked(vma->vm_mm);
 
-       return vma->anon_name->name;
-}
-
-void dup_vma_anon_name(struct vm_area_struct *orig_vma,
-                      struct vm_area_struct *new_vma)
-{
-       if (!has_vma_anon_name(orig_vma))
-               return;
-
-       kref_get(&orig_vma->anon_name->kref);
-       new_vma->anon_name = orig_vma->anon_name;
-}
-
-void free_vma_anon_name(struct vm_area_struct *vma)
-{
-       struct anon_vma_name *anon_name;
-
-       if (!has_vma_anon_name(vma))
-               return;
+       if (vma->vm_file)
+               return NULL;
 
-       anon_name = vma->anon_name;
-       vma->anon_name = NULL;
-       kref_put(&anon_name->kref, vma_anon_name_free);
+       return vma->anon_name;
 }
 
 /* mmap_lock should be write-locked */
-static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
+static int replace_anon_vma_name(struct vm_area_struct *vma,
+                                struct anon_vma_name *anon_name)
 {
-       const char *anon_name;
+       struct anon_vma_name *orig_name = anon_vma_name(vma);
 
-       if (!name) {
-               free_vma_anon_name(vma);
+       if (!anon_name) {
+               vma->anon_name = NULL;
+               anon_vma_name_put(orig_name);
                return 0;
        }
 
-       anon_name = vma_anon_name(vma);
-       if (anon_name) {
-               /* Same name, nothing to do here */
-               if (!strcmp(name, anon_name))
-                       return 0;
+       if (anon_vma_name_eq(orig_name, anon_name))
+               return 0;
 
-               free_vma_anon_name(vma);
-       }
-       vma->anon_name = anon_vma_name_alloc(name);
-       if (!vma->anon_name)
-               return -ENOMEM;
+       vma->anon_name = anon_vma_name_reuse(anon_name);
+       anon_vma_name_put(orig_name);
 
        return 0;
 }
 #else /* CONFIG_ANON_VMA_NAME */
-static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
+static int replace_anon_vma_name(struct vm_area_struct *vma,
+                                struct anon_vma_name *anon_name)
 {
-       if (name)
+       if (anon_name)
                return -EINVAL;
 
        return 0;
@@ -161,17 +131,19 @@ static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
 /*
  * Update the vm_flags on region of a vma, splitting it or merging it as
  * necessary.  Must be called with mmap_sem held for writing;
+ * Caller should ensure anon_name stability by raising its refcount even when
+ * anon_name belongs to a valid vma because this function might free that vma.
  */
 static int madvise_update_vma(struct vm_area_struct *vma,
                              struct vm_area_struct **prev, unsigned long start,
                              unsigned long end, unsigned long new_flags,
-                             const char *name)
+                             struct anon_vma_name *anon_name)
 {
        struct mm_struct *mm = vma->vm_mm;
        int error;
        pgoff_t pgoff;
 
-       if (new_flags == vma->vm_flags && is_same_vma_anon_name(vma, name)) {
+       if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
                *prev = vma;
                return 0;
        }
@@ -179,7 +151,7 @@ static int madvise_update_vma(struct vm_area_struct *vma,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma),
-                         vma->vm_userfaultfd_ctx, name);
+                         vma->vm_userfaultfd_ctx, anon_name);
        if (*prev) {
                vma = *prev;
                goto success;
@@ -209,7 +181,7 @@ success:
         */
        vma->vm_flags = new_flags;
        if (!vma->vm_file) {
-               error = replace_vma_anon_name(vma, name);
+               error = replace_anon_vma_name(vma, anon_name);
                if (error)
                        return error;
        }
@@ -975,6 +947,7 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
                                unsigned long behavior)
 {
        int error;
+       struct anon_vma_name *anon_name;
        unsigned long new_flags = vma->vm_flags;
 
        switch (behavior) {
@@ -1040,8 +1013,11 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
                break;
        }
 
+       anon_name = anon_vma_name(vma);
+       anon_vma_name_get(anon_name);
        error = madvise_update_vma(vma, prev, start, end, new_flags,
-                                  vma_anon_name(vma));
+                                  anon_name);
+       anon_vma_name_put(anon_name);
 
 out:
        /*
@@ -1225,7 +1201,7 @@ int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
 static int madvise_vma_anon_name(struct vm_area_struct *vma,
                                 struct vm_area_struct **prev,
                                 unsigned long start, unsigned long end,
-                                unsigned long name)
+                                unsigned long anon_name)
 {
        int error;
 
@@ -1234,7 +1210,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma,
                return -EBADF;
 
        error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
-                                  (const char *)name);
+                                  (struct anon_vma_name *)anon_name);
 
        /*
         * madvise() returns EAGAIN if kernel resources, such as
@@ -1246,7 +1222,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma,
 }
 
 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
-                         unsigned long len_in, const char *name)
+                         unsigned long len_in, struct anon_vma_name *anon_name)
 {
        unsigned long end;
        unsigned long len;
@@ -1266,7 +1242,7 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
        if (end == start)
                return 0;
 
-       return madvise_walk_vmas(mm, start, end, (unsigned long)name,
+       return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
                                 madvise_vma_anon_name);
 }
 #endif /* CONFIG_ANON_VMA_NAME */
index 1018e50..b12a364 100644 (file)
@@ -366,14 +366,20 @@ void __init memblock_discard(void)
                addr = __pa(memblock.reserved.regions);
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.reserved.max);
-               memblock_free_late(addr, size);
+               if (memblock_reserved_in_slab)
+                       kfree(memblock.reserved.regions);
+               else
+                       memblock_free_late(addr, size);
        }
 
        if (memblock.memory.regions != memblock_memory_init_regions) {
                addr = __pa(memblock.memory.regions);
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.memory.max);
-               memblock_free_late(addr, size);
+               if (memblock_memory_in_slab)
+                       kfree(memblock.memory.regions);
+               else
+                       memblock_free_late(addr, size);
        }
 
        memblock_memory = NULL;
index 09d342c..36e9f38 100644 (file)
@@ -254,7 +254,7 @@ struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 }
 
 #ifdef CONFIG_MEMCG_KMEM
-extern spinlock_t css_set_lock;
+static DEFINE_SPINLOCK(objcg_lock);
 
 bool mem_cgroup_kmem_disabled(void)
 {
@@ -298,9 +298,9 @@ static void obj_cgroup_release(struct percpu_ref *ref)
        if (nr_pages)
                obj_cgroup_uncharge_pages(objcg, nr_pages);
 
-       spin_lock_irqsave(&css_set_lock, flags);
+       spin_lock_irqsave(&objcg_lock, flags);
        list_del(&objcg->list);
-       spin_unlock_irqrestore(&css_set_lock, flags);
+       spin_unlock_irqrestore(&objcg_lock, flags);
 
        percpu_ref_exit(ref);
        kfree_rcu(objcg, rcu);
@@ -332,7 +332,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 
        objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 
-       spin_lock_irq(&css_set_lock);
+       spin_lock_irq(&objcg_lock);
 
        /* 1) Ready to reparent active objcg. */
        list_add(&objcg->list, &memcg->objcg_list);
@@ -342,7 +342,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
        /* 3) Move already reparented objcgs to the parent's list */
        list_splice(&memcg->objcg_list, &parent->objcg_list);
 
-       spin_unlock_irq(&css_set_lock);
+       spin_unlock_irq(&objcg_lock);
 
        percpu_ref_kill(&objcg->refcnt);
 }
index 9f80f16..08f5f83 100644 (file)
 static void memfd_tag_pins(struct xa_state *xas)
 {
        struct page *page;
-       unsigned int tagged = 0;
+       int latency = 0;
+       int cache_count;
 
        lru_add_drain();
 
        xas_lock_irq(xas);
        xas_for_each(xas, page, ULONG_MAX) {
-               if (xa_is_value(page))
-                       continue;
-               page = find_subpage(page, xas->xa_index);
-               if (page_count(page) - page_mapcount(page) > 1)
+               cache_count = 1;
+               if (!xa_is_value(page) &&
+                   PageTransHuge(page) && !PageHuge(page))
+                       cache_count = HPAGE_PMD_NR;
+
+               if (!xa_is_value(page) &&
+                   page_count(page) - total_mapcount(page) != cache_count)
                        xas_set_mark(xas, MEMFD_TAG_PINNED);
+               if (cache_count != 1)
+                       xas_set(xas, page->index + cache_count);
 
-               if (++tagged % XA_CHECK_SCHED)
+               latency += cache_count;
+               if (latency < XA_CHECK_SCHED)
                        continue;
+               latency = 0;
 
                xas_pause(xas);
                xas_unlock_irq(xas);
@@ -73,7 +81,8 @@ static int memfd_wait_for_pins(struct address_space *mapping)
 
        error = 0;
        for (scan = 0; scan <= LAST_SCAN; scan++) {
-               unsigned int tagged = 0;
+               int latency = 0;
+               int cache_count;
 
                if (!xas_marked(&xas, MEMFD_TAG_PINNED))
                        break;
@@ -87,10 +96,14 @@ static int memfd_wait_for_pins(struct address_space *mapping)
                xas_lock_irq(&xas);
                xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
                        bool clear = true;
-                       if (xa_is_value(page))
-                               continue;
-                       page = find_subpage(page, xas.xa_index);
-                       if (page_count(page) - page_mapcount(page) != 1) {
+
+                       cache_count = 1;
+                       if (!xa_is_value(page) &&
+                           PageTransHuge(page) && !PageHuge(page))
+                               cache_count = HPAGE_PMD_NR;
+
+                       if (!xa_is_value(page) && cache_count !=
+                           page_count(page) - total_mapcount(page)) {
                                /*
                                 * On the last scan, we clean up all those tags
                                 * we inserted; but make a note that we still
@@ -103,8 +116,11 @@ static int memfd_wait_for_pins(struct address_space *mapping)
                        }
                        if (clear)
                                xas_clear_mark(&xas, MEMFD_TAG_PINNED);
-                       if (++tagged % XA_CHECK_SCHED)
+
+                       latency += cache_count;
+                       if (latency < XA_CHECK_SCHED)
                                continue;
+                       latency = 0;
 
                        xas_pause(&xas);
                        xas_unlock_irq(&xas);
index 028e8dd..69284d3 100644 (file)
@@ -814,7 +814,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                 vma->anon_vma, vma->vm_file, pgoff,
                                 new_pol, vma->vm_userfaultfd_ctx,
-                                vma_anon_name(vma));
+                                anon_vma_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
index 8f584ed..25934e7 100644 (file)
@@ -512,7 +512,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma),
-                         vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+                         vma->vm_userfaultfd_ctx, anon_vma_name(vma));
        if (*prev) {
                vma = *prev;
                goto success;
index 1e8fdb0..f61a154 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1031,7 +1031,7 @@ again:
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                                struct file *file, unsigned long vm_flags,
                                struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                               const char *anon_name)
+                               struct anon_vma_name *anon_name)
 {
        /*
         * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1049,7 +1049,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
                return 0;
        if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
                return 0;
-       if (!is_same_vma_anon_name(vma, anon_name))
+       if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
                return 0;
        return 1;
 }
@@ -1084,7 +1084,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
                     struct anon_vma *anon_vma, struct file *file,
                     pgoff_t vm_pgoff,
                     struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                    const char *anon_name)
+                    struct anon_vma_name *anon_name)
 {
        if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
@@ -1106,7 +1106,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
                    struct anon_vma *anon_vma, struct file *file,
                    pgoff_t vm_pgoff,
                    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                   const char *anon_name)
+                   struct anon_vma_name *anon_name)
 {
        if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
@@ -1167,7 +1167,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        struct anon_vma *anon_vma, struct file *file,
                        pgoff_t pgoff, struct mempolicy *policy,
                        struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
-                       const char *anon_name)
+                       struct anon_vma_name *anon_name)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
@@ -3186,6 +3186,7 @@ void exit_mmap(struct mm_struct *mm)
                vma = remove_vma(vma);
                cond_resched();
        }
+       mm->mmap = NULL;
        mmap_write_unlock(mm);
        vm_unacct_memory(nr_accounted);
 }
@@ -3255,7 +3256,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
                            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-                           vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+                           vma->vm_userfaultfd_ctx, anon_vma_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
index 0138dfc..2887644 100644 (file)
@@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                                /* Also skip shared copy-on-write pages */
                                if (is_cow_mapping(vma->vm_flags) &&
-                                   page_mapcount(page) != 1)
+                                   page_count(page) != 1)
                                        continue;
 
                                /*
@@ -464,7 +464,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *pprev = vma_merge(mm, *pprev, start, end, newflags,
                           vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-                          vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+                          vma->vm_userfaultfd_ctx, anon_vma_name(vma));
        if (*pprev) {
                vma = *pprev;
                VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
index 7e43369..d310208 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -587,8 +587,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
                return ret;
 
        /* Don't even allow crazy sizes */
-       if (WARN_ON_ONCE(size > INT_MAX))
+       if (unlikely(size > INT_MAX)) {
+               WARN_ON_ONCE(!(flags & __GFP_NOWARN));
                return NULL;
+       }
 
        return __vmalloc_node(size, 1, flags, node,
                        __builtin_return_address(0));
index 090bfb6..59b14e0 100644 (file)
@@ -1066,8 +1066,10 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
         * forward progress (e.g. journalling workqueues or kthreads).
         */
        if (!current_is_kswapd() &&
-           current->flags & (PF_IO_WORKER|PF_KTHREAD))
+           current->flags & (PF_IO_WORKER|PF_KTHREAD)) {
+               cond_resched();
                return;
+       }
 
        /*
         * These figures are pulled out of thin air.
index 1a705a4..5eaf388 100644 (file)
@@ -129,6 +129,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
                                   u32 skb_prio, u16 vlan_prio);
 int vlan_dev_set_egress_priority(const struct net_device *dev,
                                 u32 skb_prio, u16 vlan_prio);
+void vlan_dev_free_egress_priority(const struct net_device *dev);
 int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result,
                               size_t size);
@@ -139,7 +140,6 @@ int vlan_check_real_dev(struct net_device *real_dev,
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
-void vlan_dev_uninit(struct net_device *dev);
 bool vlan_dev_inherit_address(struct net_device *dev,
                              struct net_device *real_dev);
 
index 26d031a..d190282 100644 (file)
@@ -622,7 +622,7 @@ static int vlan_dev_init(struct net_device *dev)
 }
 
 /* Note: this function might be called multiple times for the same device. */
-void vlan_dev_uninit(struct net_device *dev)
+void vlan_dev_free_egress_priority(const struct net_device *dev)
 {
        struct vlan_priority_tci_mapping *pm;
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -636,6 +636,16 @@ void vlan_dev_uninit(struct net_device *dev)
        }
 }
 
+static void vlan_dev_uninit(struct net_device *dev)
+{
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+       vlan_dev_free_egress_priority(dev);
+
+       /* Get rid of the vlan's reference to real_dev */
+       dev_put_track(vlan->real_dev, &vlan->dev_tracker);
+}
+
 static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
        netdev_features_t features)
 {
@@ -846,9 +856,6 @@ static void vlan_dev_free(struct net_device *dev)
 
        free_percpu(vlan->vlan_pcpu_stats);
        vlan->vlan_pcpu_stats = NULL;
-
-       /* Get rid of the vlan's reference to real_dev */
-       dev_put_track(vlan->real_dev, &vlan->dev_tracker);
 }
 
 void vlan_setup(struct net_device *dev)
index 0db85ae..53b1955 100644 (file)
@@ -183,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
 
        err = vlan_changelink(dev, tb, data, extack);
-       if (!err)
-               err = register_vlan_dev(dev, extack);
        if (err)
-               vlan_dev_uninit(dev);
+               return err;
+       err = register_vlan_dev(dev, extack);
+       if (err)
+               vlan_dev_free_egress_priority(dev);
        return err;
 }
 
index 3e49d28..d53cbb4 100644 (file)
@@ -91,9 +91,10 @@ again:
                        spin_unlock_bh(&ax25_list_lock);
                        lock_sock(sk);
                        s->ax25_dev = NULL;
+                       dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
                        ax25_dev_put(ax25_dev);
-                       release_sock(sk);
                        ax25_disconnect(s, ENETUNREACH);
+                       release_sock(sk);
                        spin_lock_bh(&ax25_list_lock);
                        sock_put(sk);
                        /* The entry could have been deleted from the
@@ -1116,8 +1117,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                }
        }
 
-       if (ax25_dev != NULL)
+       if (ax25_dev) {
                ax25_fillin_cb(ax25, ax25_dev);
+               dev_hold_track(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
+       }
 
 done:
        ax25_cb_add(ax25);
index 8a2b78f..35fadb9 100644 (file)
@@ -149,22 +149,25 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
        struct net *net = dev_net(net_dev);
        struct net_device *parent_dev;
        struct net *parent_net;
+       int iflink;
        bool ret;
 
        /* check if this is a batman-adv mesh interface */
        if (batadv_softif_is_valid(net_dev))
                return true;
 
-       /* no more parents..stop recursion */
-       if (dev_get_iflink(net_dev) == 0 ||
-           dev_get_iflink(net_dev) == net_dev->ifindex)
+       iflink = dev_get_iflink(net_dev);
+       if (iflink == 0)
                return false;
 
        parent_net = batadv_getlink_net(net_dev, net);
 
+       /* iflink to itself, most likely physical device */
+       if (net == parent_net && iflink == net_dev->ifindex)
+               return false;
+
        /* recurse over the parent device */
-       parent_dev = __dev_get_by_index((struct net *)parent_net,
-                                       dev_get_iflink(net_dev));
+       parent_dev = __dev_get_by_index((struct net *)parent_net, iflink);
        /* if we got a NULL parent_dev there is something broken.. */
        if (!parent_dev) {
                pr_err("Cannot find parent device\n");
@@ -214,14 +217,15 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
        struct net_device *real_netdev = NULL;
        struct net *real_net;
        struct net *net;
-       int ifindex;
+       int iflink;
 
        ASSERT_RTNL();
 
        if (!netdev)
                return NULL;
 
-       if (netdev->ifindex == dev_get_iflink(netdev)) {
+       iflink = dev_get_iflink(netdev);
+       if (iflink == 0) {
                dev_hold(netdev);
                return netdev;
        }
@@ -231,9 +235,16 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
                goto out;
 
        net = dev_net(hard_iface->soft_iface);
-       ifindex = dev_get_iflink(netdev);
        real_net = batadv_getlink_net(netdev, net);
-       real_netdev = dev_get_by_index(real_net, ifindex);
+
+       /* iflink to itself, most likely physical device */
+       if (net == real_net && netdev->ifindex == iflink) {
+               real_netdev = netdev;
+               dev_hold(real_netdev);
+               goto out;
+       }
+
+       real_netdev = dev_get_by_index(real_net, iflink);
 
 out:
        batadv_hardif_put(hard_iface);
index 2b7bd36..2882bc7 100644 (file)
@@ -2738,6 +2738,7 @@ void hci_release_dev(struct hci_dev *hdev)
        hci_dev_unlock(hdev);
 
        ida_simple_remove(&hci_index_ida, hdev->id);
+       kfree_skb(hdev->sent_cmd);
        kfree(hdev);
 }
 EXPORT_SYMBOL(hci_release_dev);
index 0feb68f..9ba2a1a 100644 (file)
@@ -1841,6 +1841,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
        struct bdaddr_list *b, *t;
        u8 num_entries = 0;
        bool pend_conn, pend_report;
+       u8 filter_policy;
        int err;
 
        /* Pause advertising if resolving list can be used as controllers are
@@ -1927,6 +1928,8 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
                err = -EINVAL;
 
 done:
+       filter_policy = err ? 0x00 : 0x01;
+
        /* Enable address resolution when LL Privacy is enabled. */
        err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
        if (err)
@@ -1937,7 +1940,7 @@ done:
                hci_resume_advertising_sync(hdev);
 
        /* Select filter policy to use accept list */
-       return err ? 0x00 : 0x01;
+       return filter_policy;
 }
 
 /* Returns true if an le connection is in the scanning state */
@@ -3262,10 +3265,10 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
        if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
                events[0] |= 0x40;      /* LE Data Length Change */
 
-       /* If the controller supports LL Privacy feature, enable
-        * the corresponding event.
+       /* If the controller supports LL Privacy feature or LE Extended Adv,
+        * enable the corresponding event.
         */
-       if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
+       if (use_enhanced_conn_complete(hdev))
                events[1] |= 0x02;      /* LE Enhanced Connection Complete */
 
        /* If the controller supports Extended Scanner Filter
@@ -4106,9 +4109,9 @@ int hci_dev_close_sync(struct hci_dev *hdev)
        hci_inquiry_cache_flush(hdev);
        hci_pend_le_actions_clear(hdev);
        hci_conn_hash_flush(hdev);
-       hci_dev_unlock(hdev);
-
+       /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
        smp_unregister(hdev);
+       hci_dev_unlock(hdev);
 
        hci_sock_dev_event(hdev, HCI_DEV_DOWN);
 
@@ -5185,7 +5188,7 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
        return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
                                        plen, data,
                                        HCI_EV_LE_ENHANCED_CONN_COMPLETE,
-                                       HCI_CMD_TIMEOUT, NULL);
+                                       conn->conn_timeout, NULL);
 }
 
 int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
@@ -5270,9 +5273,18 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
        cp.min_ce_len = cpu_to_le16(0x0000);
        cp.max_ce_len = cpu_to_le16(0x0000);
 
+       /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
+        *
+        * If this event is unmasked and the HCI_LE_Connection_Complete event
+        * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
+        * sent when a new connection has been created.
+        */
        err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
-                                      sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE,
-                                      HCI_CMD_TIMEOUT, NULL);
+                                      sizeof(cp), &cp,
+                                      use_enhanced_conn_complete(hdev) ?
+                                      HCI_EV_LE_ENHANCED_CONN_COMPLETE :
+                                      HCI_EV_LE_CONN_COMPLETE,
+                                      conn->conn_timeout, NULL);
 
 done:
        /* Re-enable advertising after the connection attempt is finished. */
index 37087cf..533cf60 100644 (file)
@@ -1218,7 +1218,13 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
 {
        struct mgmt_pending_cmd *cmd = data;
-       struct mgmt_mode *cp = cmd->param;
+       struct mgmt_mode *cp;
+
+       /* Make sure cmd still outstanding. */
+       if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+               return;
+
+       cp = cmd->param;
 
        bt_dev_dbg(hdev, "err %d", err);
 
@@ -1242,7 +1248,7 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
                                mgmt_status(err));
        }
 
-       mgmt_pending_free(cmd);
+       mgmt_pending_remove(cmd);
 }
 
 static int set_powered_sync(struct hci_dev *hdev, void *data)
@@ -1281,7 +1287,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -1290,6 +1296,9 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
        err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
                                 mgmt_set_powered_complete);
 
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
 failed:
        hci_dev_unlock(hdev);
        return err;
@@ -1383,6 +1392,10 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
 
        bt_dev_dbg(hdev, "err %d", err);
 
+       /* Make sure cmd still outstanding. */
+       if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+               return;
+
        hci_dev_lock(hdev);
 
        if (err) {
@@ -1402,7 +1415,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
        new_settings(hdev, cmd->sk);
 
 done:
-       mgmt_pending_free(cmd);
+       mgmt_pending_remove(cmd);
        hci_dev_unlock(hdev);
 }
 
@@ -1511,7 +1524,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -1538,6 +1551,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
        err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
                                 mgmt_set_discoverable_complete);
 
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
 failed:
        hci_dev_unlock(hdev);
        return err;
@@ -1550,6 +1566,10 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
 
        bt_dev_dbg(hdev, "err %d", err);
 
+       /* Make sure cmd still outstanding. */
+       if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+               return;
+
        hci_dev_lock(hdev);
 
        if (err) {
@@ -1562,7 +1582,9 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
        new_settings(hdev, cmd->sk);
 
 done:
-       mgmt_pending_free(cmd);
+       if (cmd)
+               mgmt_pending_remove(cmd);
+
        hci_dev_unlock(hdev);
 }
 
@@ -1634,7 +1656,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -1654,6 +1676,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
        err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
                                 mgmt_set_connectable_complete);
 
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
 failed:
        hci_dev_unlock(hdev);
        return err;
@@ -1774,6 +1799,10 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
        u8 enable = cp->val;
        bool changed;
 
+       /* Make sure cmd still outstanding. */
+       if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
+               return;
+
        if (err) {
                u8 mgmt_err = mgmt_status(err);
 
@@ -3321,6 +3350,9 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
 
        bt_dev_dbg(hdev, "err %d", err);
 
+       if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
+               return;
+
        if (status) {
                mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
                                status);
@@ -3493,6 +3525,9 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
        struct sk_buff *skb = cmd->skb;
        u8 status = mgmt_status(err);
 
+       if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
+               return;
+
        if (!status) {
                if (!skb)
                        status = MGMT_STATUS_FAILED;
@@ -3759,13 +3794,6 @@ static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
 
        hci_dev_lock(hdev);
 
-       if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
-               err = mgmt_cmd_status(sk, hdev->id,
-                                     MGMT_OP_SET_WIDEBAND_SPEECH,
-                                     MGMT_STATUS_BUSY);
-               goto unlock;
-       }
-
        if (hdev_is_powered(hdev) &&
            !!cp->val != hci_dev_test_flag(hdev,
                                           HCI_WIDEBAND_SPEECH_ENABLED)) {
@@ -5036,12 +5064,6 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
-               err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                     MGMT_STATUS_BUSY);
-               goto unlock;
-       }
-
        cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
        if (!cmd)
                err = -ENOMEM;
@@ -5261,11 +5283,16 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
 {
        struct mgmt_pending_cmd *cmd = data;
 
+       if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
+           cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
+           cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
+               return;
+
        bt_dev_dbg(hdev, "err %d", err);
 
        mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
                          cmd->param, 1);
-       mgmt_pending_free(cmd);
+       mgmt_pending_remove(cmd);
 
        hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
                                DISCOVERY_FINDING);
@@ -5327,7 +5354,7 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
        else
                hdev->discovery.limited = false;
 
-       cmd = mgmt_pending_new(sk, op, hdev, data, len);
+       cmd = mgmt_pending_add(sk, op, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -5336,7 +5363,7 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
        err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
                                 start_discovery_complete);
        if (err < 0) {
-               mgmt_pending_free(cmd);
+               mgmt_pending_remove(cmd);
                goto failed;
        }
 
@@ -5430,7 +5457,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
                goto failed;
        }
 
-       cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
+       cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
                               hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
@@ -5463,7 +5490,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
        err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
                                 start_discovery_complete);
        if (err < 0) {
-               mgmt_pending_free(cmd);
+               mgmt_pending_remove(cmd);
                goto failed;
        }
 
@@ -5495,11 +5522,14 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
 {
        struct mgmt_pending_cmd *cmd = data;
 
+       if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
+               return;
+
        bt_dev_dbg(hdev, "err %d", err);
 
        mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
                          cmd->param, 1);
-       mgmt_pending_free(cmd);
+       mgmt_pending_remove(cmd);
 
        if (!err)
                hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
@@ -5535,7 +5565,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
-       cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto unlock;
@@ -5544,7 +5574,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
        err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
                                 stop_discovery_complete);
        if (err < 0) {
-               mgmt_pending_free(cmd);
+               mgmt_pending_remove(cmd);
                goto unlock;
        }
 
@@ -7474,6 +7504,9 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
        u8 status = mgmt_status(err);
        u16 eir_len;
 
+       if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
+               return;
+
        if (!status) {
                if (!skb)
                        status = MGMT_STATUS_FAILED;
@@ -7969,11 +8002,7 @@ static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
 
 static bool adv_busy(struct hci_dev *hdev)
 {
-       return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
-               pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
-               pending_find(MGMT_OP_SET_LE, hdev) ||
-               pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
-               pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
+       return pending_find(MGMT_OP_SET_LE, hdev);
 }
 
 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
@@ -8563,9 +8592,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
-           pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
-           pending_find(MGMT_OP_SET_LE, hdev)) {
+       if (pending_find(MGMT_OP_SET_LE, hdev)) {
                err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
                                      MGMT_STATUS_BUSY);
                goto unlock;
index edee60b..37eef2c 100644 (file)
@@ -77,11 +77,12 @@ int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
 {
        struct hci_dev *hdev;
        struct mgmt_hdr *hdr;
-       int len = skb->len;
+       int len;
 
        if (!skb)
                return -EINVAL;
 
+       len = skb->len;
        hdev = bt_cb(skb)->mgmt.hdev;
 
        /* Time stamp */
index de24098..db4f264 100644 (file)
@@ -82,6 +82,9 @@ static void br_multicast_find_del_pg(struct net_bridge *br,
                                     struct net_bridge_port_group *pg);
 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
 
+static int br_mc_disabled_update(struct net_device *dev, bool value,
+                                struct netlink_ext_ack *extack);
+
 static struct net_bridge_port_group *
 br_sg_port_find(struct net_bridge *br,
                struct net_bridge_port_group_sg_key *sg_p)
@@ -1156,6 +1159,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
                return mp;
 
        if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+               br_mc_disabled_update(br->dev, false, NULL);
                br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
                return ERR_PTR(-E2BIG);
        }
index 02cbcb2..d2a430b 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/spinlock.h>
 #include <linux/hrtimer.h>
 #include <linux/wait.h>
 #include <linux/uio.h>
@@ -145,6 +146,7 @@ struct isotp_sock {
        struct tpcon rx, tx;
        struct list_head notifier;
        wait_queue_head_t wait;
+       spinlock_t rx_lock; /* protect single thread state machine */
 };
 
 static LIST_HEAD(isotp_notifier_list);
@@ -615,11 +617,17 @@ static void isotp_rcv(struct sk_buff *skb, void *data)
 
        n_pci_type = cf->data[ae] & 0xF0;
 
+       /* Make sure the state changes and data structures stay consistent at
+        * CAN frame reception time. This locking is not needed in real world
+        * use cases but the inconsistency can be triggered with syzkaller.
+        */
+       spin_lock(&so->rx_lock);
+
        if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
                /* check rx/tx path half duplex expectations */
                if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
                    (so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
-                       return;
+                       goto out_unlock;
        }
 
        switch (n_pci_type) {
@@ -668,6 +676,9 @@ static void isotp_rcv(struct sk_buff *skb, void *data)
                isotp_rcv_cf(sk, cf, ae, skb);
                break;
        }
+
+out_unlock:
+       spin_unlock(&so->rx_lock);
 }
 
 static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
@@ -876,7 +887,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        if (!size || size > MAX_MSG_LENGTH) {
                err = -EINVAL;
-               goto err_out;
+               goto err_out_drop;
        }
 
        /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
@@ -886,24 +897,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
            (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
                err = -EINVAL;
-               goto err_out;
+               goto err_out_drop;
        }
 
        err = memcpy_from_msg(so->tx.buf, msg, size);
        if (err < 0)
-               goto err_out;
+               goto err_out_drop;
 
        dev = dev_get_by_index(sock_net(sk), so->ifindex);
        if (!dev) {
                err = -ENXIO;
-               goto err_out;
+               goto err_out_drop;
        }
 
        skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
                                  msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb) {
                dev_put(dev);
-               goto err_out;
+               goto err_out_drop;
        }
 
        can_skb_reserve(skb);
@@ -965,7 +976,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err) {
                pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
                               __func__, ERR_PTR(err));
-               goto err_out;
+               goto err_out_drop;
        }
 
        if (wait_tx_done) {
@@ -978,6 +989,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        return size;
 
+err_out_drop:
+       /* drop this PDU and unlock a potential wait queue */
+       old_state = ISOTP_IDLE;
 err_out:
        so->tx.state = old_state;
        if (so->tx.state == ISOTP_IDLE)
@@ -1444,6 +1458,7 @@ static int isotp_init(struct sock *sk)
        so->txtimer.function = isotp_tx_timer_handler;
 
        init_waitqueue_head(&so->wait);
+       spin_lock_init(&so->rx_lock);
 
        spin_lock(&isotp_notifier_lock);
        list_add_tail(&so->notifier, &isotp_notifier_list);
index a271688..307ee11 100644 (file)
@@ -2006,7 +2006,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
                /* set the end-packet for broadcast */
                session->pkt.last = session->pkt.total;
 
-       skcb->tskey = session->sk->sk_tskey++;
+       skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1;
        session->tskey = skcb->tskey;
 
        return session;
index 7b288a1..d5dc6be 100644 (file)
@@ -283,13 +283,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
 
        rcu_read_lock();
        list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
+               struct net_device *dev;
+
                /*
                 * only add a note to our monitor buffer if:
                 * 1) this is the dev we received on
                 * 2) its after the last_rx delta
                 * 3) our rx_dropped count has gone up
                 */
-               if ((new_stat->dev == napi->dev)  &&
+               /* Paired with WRITE_ONCE() in dropmon_net_event() */
+               dev = READ_ONCE(new_stat->dev);
+               if ((dev == napi->dev)  &&
                    (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
                    (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
                        trace_drop_common(NULL, NULL);
@@ -1576,7 +1580,10 @@ static int dropmon_net_event(struct notifier_block *ev_block,
                mutex_lock(&net_dm_mutex);
                list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
                        if (new_stat->dev == dev) {
-                               new_stat->dev = NULL;
+
+                               /* Paired with READ_ONCE() in trace_napi_poll_hit() */
+                               WRITE_ONCE(new_stat->dev, NULL);
+
                                if (trace_state == TRACE_OFF) {
                                        list_del_rcu(&new_stat->list);
                                        kfree_rcu(new_stat, rcu);
index 4603b7c..9eb7858 100644 (file)
@@ -2710,6 +2710,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
        if (unlikely(flags))
                return -EINVAL;
 
+       if (unlikely(len == 0))
+               return 0;
+
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
index 53ea262..fbddf96 100644 (file)
@@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev,
        if (!rtnl_trylock())
                return restart_syscall();
 
-       if (netif_running(netdev)) {
+       if (netif_running(netdev) && netif_device_present(netdev)) {
                struct ethtool_link_ksettings cmd;
 
                if (!__ethtool_get_link_ksettings(netdev, &cmd))
index 710da8a..2fb8eb6 100644 (file)
@@ -1699,6 +1699,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
 {
        struct ifinfomsg *ifm;
        struct nlmsghdr *nlh;
+       struct Qdisc *qdisc;
 
        ASSERT_RTNL();
        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -1716,6 +1717,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
        if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
                goto nla_put_failure;
 
+       qdisc = rtnl_dereference(dev->qdisc);
        if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
            nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
            nla_put_u8(skb, IFLA_OPERSTATE,
@@ -1735,8 +1737,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
 #endif
            put_master_ifindex(skb, dev) ||
            nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
-           (dev->qdisc &&
-            nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+           (qdisc &&
+            nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
            nla_put_ifalias(skb, dev) ||
            nla_put_u32(skb, IFLA_CARRIER_CHANGES,
                        atomic_read(&dev->carrier_up_count) +
index 0118f0a..ea51e23 100644 (file)
@@ -681,7 +681,7 @@ exit:
         * while trying to recycle fragments on __skb_frag_unref() we need
         * to make one SKB responsible for triggering the recycle path.
         * So disable the recycling bit if an SKB is cloned and we have
-        * additional references to to the fragmented part of the SKB.
+        * additional references to the fragmented part of the SKB.
         * Eventually the last SKB will have the recycling bit set and it's
         * dataref set to 0, which will trigger the recycling
         */
@@ -2276,7 +2276,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
                /* Free pulled out fragments. */
                while ((list = skb_shinfo(skb)->frag_list) != insp) {
                        skb_shinfo(skb)->frag_list = list->next;
-                       kfree_skb(list);
+                       consume_skb(list);
                }
                /* And insert new clone at head. */
                if (clone) {
@@ -3876,6 +3876,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                list_skb = list_skb->next;
 
                err = 0;
+               delta_truesize += nskb->truesize;
                if (skb_shared(nskb)) {
                        tmp = skb_clone(nskb, GFP_ATOMIC);
                        if (tmp) {
@@ -3900,7 +3901,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                tail = nskb;
 
                delta_len += nskb->len;
-               delta_truesize += nskb->truesize;
 
                skb_push(nskb, -skb_network_offset(nskb) + offset);
 
@@ -4730,7 +4730,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk_is_tcp(sk))
-                       serr->ee.ee_data -= sk->sk_tskey;
+                       serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
        }
 
        err = sock_queue_err_skb(sk, skb);
@@ -6105,7 +6105,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
        /* Free pulled out fragments. */
        while ((list = shinfo->frag_list) != insp) {
                shinfo->frag_list = list->next;
-               kfree_skb(list);
+               consume_skb(list);
        }
        /* And insert new clone at head. */
        if (clone) {
index 8eb671c..929a2b0 100644 (file)
@@ -1153,7 +1153,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
        struct sk_psock *psock;
        struct bpf_prog *prog;
        int ret = __SK_DROP;
-       int len = skb->len;
+       int len = orig_len;
 
        /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
        skb = skb_clone(skb, GFP_ATOMIC);
index 4ff806d..6eb1748 100644 (file)
@@ -879,9 +879,9 @@ int sock_set_timestamping(struct sock *sk, int optname,
                        if ((1 << sk->sk_state) &
                            (TCPF_CLOSE | TCPF_LISTEN))
                                return -EINVAL;
-                       sk->sk_tskey = tcp_sk(sk)->snd_una;
+                       atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
                } else {
-                       sk->sk_tskey = 0;
+                       atomic_set(&sk->sk_tskey, 0);
                }
        }
 
index b441ab3..dc4fb69 100644 (file)
@@ -2073,8 +2073,52 @@ u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
 }
 EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
 
+static void dcbnl_flush_dev(struct net_device *dev)
+{
+       struct dcb_app_type *itr, *tmp;
+
+       spin_lock_bh(&dcb_lock);
+
+       list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
+               if (itr->ifindex == dev->ifindex) {
+                       list_del(&itr->list);
+                       kfree(itr);
+               }
+       }
+
+       spin_unlock_bh(&dcb_lock);
+}
+
+static int dcbnl_netdevice_event(struct notifier_block *nb,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               if (!dev->dcbnl_ops)
+                       return NOTIFY_DONE;
+
+               dcbnl_flush_dev(dev);
+
+               return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+static struct notifier_block dcbnl_nb __read_mostly = {
+       .notifier_call  = dcbnl_netdevice_event,
+};
+
 static int __init dcbnl_init(void)
 {
+       int err;
+
+       err = register_netdevice_notifier(&dcbnl_nb);
+       if (err)
+               return err;
+
        rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
 
index d9d0d22..c43f744 100644 (file)
@@ -349,6 +349,7 @@ void dsa_flush_workqueue(void)
 {
        flush_workqueue(dsa_owq);
 }
+EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
 
 int dsa_devlink_param_get(struct devlink *dl, u32 id,
                          struct devlink_param_gset_ctx *ctx)
index 3d21521..b4e6775 100644 (file)
@@ -1261,7 +1261,7 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
        info.tag_ops = tag_ops;
        err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
        if (err)
-               return err;
+               goto out_unwind_tagger;
 
        err = dsa_tree_bind_tag_proto(dst, tag_ops);
        if (err)
@@ -1718,7 +1718,6 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch);
 void dsa_switch_shutdown(struct dsa_switch *ds)
 {
        struct net_device *master, *slave_dev;
-       LIST_HEAD(unregister_list);
        struct dsa_port *dp;
 
        mutex_lock(&dsa2_mutex);
@@ -1729,25 +1728,13 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
                slave_dev = dp->slave;
 
                netdev_upper_dev_unlink(master, slave_dev);
-               /* Just unlinking ourselves as uppers of the master is not
-                * sufficient. When the master net device unregisters, that will
-                * also call dev_close, which we will catch as NETDEV_GOING_DOWN
-                * and trigger a dev_close on our own devices (dsa_slave_close).
-                * In turn, that will call dev_mc_unsync on the master's net
-                * device. If the master is also a DSA switch port, this will
-                * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
-                * its own master. Lockdep will complain about the fact that
-                * all cascaded masters have the same dsa_master_addr_list_lock_key,
-                * which it normally would not do if the cascaded masters would
-                * be in a proper upper/lower relationship, which we've just
-                * destroyed.
-                * To suppress the lockdep warnings, let's actually unregister
-                * the DSA slave interfaces too, to avoid the nonsensical
-                * multicast address list synchronization on shutdown.
-                */
-               unregister_netdevice_queue(slave_dev, &unregister_list);
        }
-       unregister_netdevice_many(&unregister_list);
+
+       /* Disconnect from further netdevice notifiers on the master,
+        * since netdev_uses_dsa() will now return false.
+        */
+       dsa_switch_for_each_cpu_port(dp, ds)
+               dp->master->dsa_ptr = NULL;
 
        rtnl_unlock();
        mutex_unlock(&dsa2_mutex);
index 760306f..23c79e9 100644 (file)
@@ -147,7 +147,6 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 
 bool dsa_schedule_work(struct work_struct *work);
-void dsa_flush_workqueue(void);
 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 
 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
index 2199104..880f910 100644 (file)
@@ -260,11 +260,16 @@ static void dsa_netdev_ops_set(struct net_device *dev,
        dev->dsa_ptr->netdev_ops = ops;
 }
 
+/* Keep the master always promiscuous if the tagging protocol requires that
+ * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
+ * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
+ * anyway.
+ */
 static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
 {
        const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
 
-       if (!ops->promisc_on_master)
+       if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
                return;
 
        ASSERT_RTNL();
index bd78192..1a40c52 100644 (file)
@@ -395,10 +395,17 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
                .tree_index = dp->ds->dst->index,
                .sw_index = dp->ds->index,
                .port = dp->index,
-               .bridge = *dp->bridge,
        };
        int err;
 
+       /* If the port could not be offloaded to begin with, then
+        * there is nothing to do.
+        */
+       if (!dp->bridge)
+               return;
+
+       info.bridge = *dp->bridge;
+
        /* Here the port is already unbridged. Reflect the current configuration
         * so that drivers can program their chips accordingly.
         */
@@ -781,9 +788,15 @@ int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
        struct dsa_port *cpu_dp = dp->cpu_dp;
        int err;
 
-       err = dev_uc_add(cpu_dp->master, addr);
-       if (err)
-               return err;
+       /* Avoid a call to __dev_set_promiscuity() on the master, which
+        * requires rtnl_lock(), since we can't guarantee that is held here,
+        * and we can't take it either.
+        */
+       if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+               err = dev_uc_add(cpu_dp->master, addr);
+               if (err)
+                       return err;
+       }
 
        return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
 }
@@ -800,9 +813,11 @@ int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
        struct dsa_port *cpu_dp = dp->cpu_dp;
        int err;
 
-       err = dev_uc_del(cpu_dp->master, addr);
-       if (err)
-               return err;
+       if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+               err = dev_uc_del(cpu_dp->master, addr);
+               if (err)
+                       return err;
+       }
 
        return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
 }
index cb54818..98d7d71 100644 (file)
@@ -77,7 +77,6 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
 {
-       __be16 *lan9303_tag;
        u16 lan9303_tag1;
        unsigned int source_port;
 
@@ -87,14 +86,15 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
                return NULL;
        }
 
-       lan9303_tag = dsa_etype_header_pos_rx(skb);
-
-       if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
-               dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
-               return NULL;
+       if (skb_vlan_tag_present(skb)) {
+               lan9303_tag1 = skb_vlan_tag_get(skb);
+               __vlan_hwaccel_clear_tag(skb);
+       } else {
+               skb_push_rcsum(skb, ETH_HLEN);
+               __skb_vlan_pop(skb, &lan9303_tag1);
+               skb_pull_rcsum(skb, ETH_HLEN);
        }
 
-       lan9303_tag1 = ntohs(lan9303_tag[1]);
        source_port = lan9303_tag1 & 0x3;
 
        skb->dev = dsa_master_find_slave(dev, 0, source_port);
@@ -103,13 +103,6 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
                return NULL;
        }
 
-       /* remove the special VLAN tag between the MAC addresses
-        * and the current ethertype field.
-        */
-       skb_pull_rcsum(skb, 2 + 2);
-
-       dsa_strip_etype_header(skb, LAN9303_TAG_LEN);
-
        if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
                dsa_default_offload_fwd_mark(skb);
 
index 9c465ba..72fde28 100644 (file)
@@ -1376,8 +1376,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        }
 
        ops = rcu_dereference(inet_offloads[proto]);
-       if (likely(ops && ops->callbacks.gso_segment))
+       if (likely(ops && ops->callbacks.gso_segment)) {
                segs = ops->callbacks.gso_segment(skb, features);
+               if (!segs)
+                       skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+       }
 
        if (IS_ERR_OR_NULL(segs))
                goto out;
index 851f542..e1b1d08 100644 (file)
@@ -671,7 +671,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
                u32 padto;
 
-               padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
+               padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
                if (skb->len < padto)
                        esp.tfclen = padto - skb->len;
        }
index 4d61ddd..85117b4 100644 (file)
@@ -436,6 +436,9 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
                if (net->ipv4.fib_has_custom_local_routes ||
                    fib4_has_custom_rules(net))
                        goto full_check;
+               /* Within the same container, it is regarded as a martian source,
+                * and the same host but different containers are not.
+                */
                if (inet_lookup_ifaddr_rcu(net, src))
                        return -EINVAL;
 
index e184bcb..78e40ea 100644 (file)
@@ -16,10 +16,9 @@ struct fib_alias {
        u8                      fa_slen;
        u32                     tb_id;
        s16                     fa_default;
-       u8                      offload:1,
-                               trap:1,
-                               offload_failed:1,
-                               unused:5;
+       u8                      offload;
+       u8                      trap;
+       u8                      offload_failed;
        struct rcu_head         rcu;
 };
 
index b458986..2dd375f 100644 (file)
@@ -525,9 +525,9 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
        fri.dst_len = dst_len;
        fri.tos = fa->fa_tos;
        fri.type = fa->fa_type;
-       fri.offload = fa->offload;
-       fri.trap = fa->trap;
-       fri.offload_failed = fa->offload_failed;
+       fri.offload = READ_ONCE(fa->offload);
+       fri.trap = READ_ONCE(fa->trap);
+       fri.offload_failed = READ_ONCE(fa->offload_failed);
        err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
index 8060524..f7f74d5 100644 (file)
@@ -1047,19 +1047,23 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
        if (!fa_match)
                goto out;
 
-       if (fa_match->offload == fri->offload && fa_match->trap == fri->trap &&
-           fa_match->offload_failed == fri->offload_failed)
+       /* These are paired with the WRITE_ONCE() happening in this function.
+        * The reason is that we are only protected by RCU at this point.
+        */
+       if (READ_ONCE(fa_match->offload) == fri->offload &&
+           READ_ONCE(fa_match->trap) == fri->trap &&
+           READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
                goto out;
 
-       fa_match->offload = fri->offload;
-       fa_match->trap = fri->trap;
+       WRITE_ONCE(fa_match->offload, fri->offload);
+       WRITE_ONCE(fa_match->trap, fri->trap);
 
        /* 2 means send notifications only if offload_failed was changed. */
        if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
-           fa_match->offload_failed == fri->offload_failed)
+           READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
                goto out;
 
-       fa_match->offload_failed = fri->offload_failed;
+       WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
 
        if (!net->ipv4.sysctl_fib_notify_on_flag_change)
                goto out;
@@ -2297,9 +2301,9 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
                                fri.dst_len = KEYLENGTH - fa->fa_slen;
                                fri.tos = fa->fa_tos;
                                fri.type = fa->fa_type;
-                               fri.offload = fa->offload;
-                               fri.trap = fa->trap;
-                               fri.offload_failed = fa->offload_failed;
+                               fri.offload = READ_ONCE(fa->offload);
+                               fri.trap = READ_ONCE(fa->trap);
+                               fri.offload_failed = READ_ONCE(fa->offload_failed);
                                err = fib_dump_info(skb,
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
index 139cec2..7911916 100644 (file)
@@ -991,7 +991,7 @@ static int __ip_append_data(struct sock *sk,
 
        if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
            sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
-               tskey = sk->sk_tskey++;
+               tskey = atomic_inc_return(&sk->sk_tskey) - 1;
 
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
index 0727461..29bbe2b 100644 (file)
@@ -256,7 +256,9 @@ static int __net_init ipmr_rules_init(struct net *net)
        return 0;
 
 err2:
+       rtnl_lock();
        ipmr_free_table(mrt);
+       rtnl_unlock();
 err1:
        fib_rules_unregister(ops);
        return err;
index bcf7bc7..3ee9475 100644 (file)
@@ -172,16 +172,22 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
        struct sock *sk = NULL;
        struct inet_sock *isk;
        struct hlist_nulls_node *hnode;
-       int dif = skb->dev->ifindex;
+       int dif, sdif;
 
        if (skb->protocol == htons(ETH_P_IP)) {
+               dif = inet_iif(skb);
+               sdif = inet_sdif(skb);
                pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
                         (int)ident, &ip_hdr(skb)->daddr, dif);
 #if IS_ENABLED(CONFIG_IPV6)
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               dif = inet6_iif(skb);
+               sdif = inet6_sdif(skb);
                pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
                         (int)ident, &ipv6_hdr(skb)->daddr, dif);
 #endif
+       } else {
+               return NULL;
        }
 
        read_lock_bh(&ping_table.lock);
@@ -221,7 +227,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
                }
 
                if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
-                   sk->sk_bound_dev_if != inet_sdif(skb))
+                   sk->sk_bound_dev_if != sdif)
                        continue;
 
                sock_hold(sk);
index ff6f91c..f33ad1f 100644 (file)
@@ -3395,8 +3395,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                                    fa->fa_tos == fri.tos &&
                                    fa->fa_info == res.fi &&
                                    fa->fa_type == fri.type) {
-                                       fri.offload = fa->offload;
-                                       fri.trap = fa->trap;
+                                       fri.offload = READ_ONCE(fa->offload);
+                                       fri.trap = READ_ONCE(fa->trap);
                                        break;
                                }
                        }
index bdf108f..28ff2a8 100644 (file)
@@ -937,6 +937,22 @@ void tcp_remove_empty_skb(struct sock *sk)
        }
 }
 
+/* skb changing from pure zc to mixed, must charge zc */
+static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
+{
+       if (unlikely(skb_zcopy_pure(skb))) {
+               u32 extra = skb->truesize -
+                           SKB_TRUESIZE(skb_end_offset(skb));
+
+               if (!sk_wmem_schedule(sk, extra))
+                       return -ENOMEM;
+
+               sk_mem_charge(sk, extra);
+               skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
+       }
+       return 0;
+}
+
 static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
                                      struct page *page, int offset, size_t *size)
 {
@@ -972,7 +988,7 @@ new_segment:
                tcp_mark_push(tp, skb);
                goto new_segment;
        }
-       if (!sk_wmem_schedule(sk, copy))
+       if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
                return NULL;
 
        if (can_coalesce) {
@@ -1320,19 +1336,8 @@ new_segment:
 
                        copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
-                       /* skb changing from pure zc to mixed, must charge zc */
-                       if (unlikely(skb_zcopy_pure(skb))) {
-                               u32 extra = skb->truesize -
-                                           SKB_TRUESIZE(skb_end_offset(skb));
-
-                               if (!sk_wmem_schedule(sk, extra))
-                                       goto wait_for_space;
-
-                               sk_mem_charge(sk, extra);
-                               skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
-                       }
-
-                       if (!sk_wmem_schedule(sk, copy))
+                       if (tcp_downgrade_zcopy_pure(sk, skb) ||
+                           !sk_wmem_schedule(sk, copy))
                                goto wait_for_space;
 
                        err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
@@ -1679,11 +1684,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                                if (!copied)
                                        copied = used;
                                break;
-                       } else if (used <= len) {
-                               seq += used;
-                               copied += used;
-                               offset += used;
                        }
+                       if (WARN_ON_ONCE(used > len))
+                               used = len;
+                       seq += used;
+                       copied += used;
+                       offset += used;
+
                        /* If recv_actor drops the lock (e.g. TCP splice
                         * receive) the skb pointer might be invalid when
                         * getting here: tcp_collapse might have deleted it
index b910035..bc3a043 100644 (file)
@@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
                list_for_each_entry(node, &info->shared->devices, list)
                        if (node->dev == dev)
                                break;
-               if (node->dev != dev)
+               if (list_entry_is_head(node, &info->shared->devices, list))
                        return;
 
                list_del(&node->list);
index f927c19..f908e2f 100644 (file)
@@ -1839,8 +1839,8 @@ out:
 }
 EXPORT_SYMBOL(ipv6_dev_get_saddr);
 
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
-                     u32 banned_flags)
+static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+                             u32 banned_flags)
 {
        struct inet6_ifaddr *ifp;
        int err = -EADDRNOTAVAIL;
@@ -3732,6 +3732,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
        struct inet6_dev *idev;
        struct inet6_ifaddr *ifa, *tmp;
        bool keep_addr = false;
+       bool was_ready;
        int state, i;
 
        ASSERT_RTNL();
@@ -3797,7 +3798,10 @@ restart:
 
        addrconf_del_rs_timer(idev);
 
-       /* Step 2: clear flags for stateless addrconf */
+       /* Step 2: clear flags for stateless addrconf, repeated down
+        *         detection
+        */
+       was_ready = idev->if_flags & IF_READY;
        if (!unregister)
                idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
 
@@ -3871,7 +3875,7 @@ restart:
        if (unregister) {
                ipv6_ac_destroy_dev(idev);
                ipv6_mc_destroy_dev(idev);
-       } else {
+       } else if (was_ready) {
                ipv6_mc_down(idev);
        }
 
@@ -4998,6 +5002,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
            nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
                goto error;
 
+       spin_lock_bh(&ifa->lock);
        if (!((ifa->flags&IFA_F_PERMANENT) &&
              (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
                preferred = ifa->prefered_lft;
@@ -5019,6 +5024,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
                preferred = INFINITY_LIFE_TIME;
                valid = INFINITY_LIFE_TIME;
        }
+       spin_unlock_bh(&ifa->lock);
 
        if (!ipv6_addr_any(&ifa->peer_addr)) {
                if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
index 8bb2c40..7591160 100644 (file)
@@ -707,7 +707,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
                u32 padto;
 
-               padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
+               padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
                if (skb->len < padto)
                        esp.tfclen = padto - skb->len;
        }
index aa673a6..ceb85c6 100644 (file)
@@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                err = -EINVAL;
                goto done;
        }
-       if (fl_shared_exclusive(fl) || fl->opt)
+       if (fl_shared_exclusive(fl) || fl->opt) {
+               WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
                static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
+       }
        return fl;
 
 done:
index b29e9ba..5f577e2 100644 (file)
@@ -114,6 +114,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        if (likely(ops && ops->callbacks.gso_segment)) {
                skb_reset_transport_header(skb);
                segs = ops->callbacks.gso_segment(skb, features);
+               if (!segs)
+                       skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
        }
 
        if (IS_ERR_OR_NULL(segs))
index 2995f8d..4788f6b 100644 (file)
@@ -1408,8 +1408,6 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
                if (np->frag_size)
                        mtu = np->frag_size;
        }
-       if (mtu < IPV6_MIN_MTU)
-               return -EINVAL;
        cork->base.fragsize = mtu;
        cork->base.gso_size = ipc6->gso_size;
        cork->base.tx_flags = 0;
@@ -1465,14 +1463,12 @@ static int __ip6_append_data(struct sock *sk,
 
        if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
            sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
-               tskey = sk->sk_tskey++;
+               tskey = atomic_inc_return(&sk->sk_tskey) - 1;
 
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
        fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
                        (opt ? opt->opt_nflen : 0);
-       maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
-                    sizeof(struct frag_hdr);
 
        headersize = sizeof(struct ipv6hdr) +
                     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
@@ -1480,6 +1476,13 @@ static int __ip6_append_data(struct sock *sk,
                      sizeof(struct frag_hdr) : 0) +
                     rt->rt6i_nfheader_len;
 
+       if (mtu < fragheaderlen ||
+           ((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr))
+               goto emsgsize;
+
+       maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+                    sizeof(struct frag_hdr);
+
        /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
         * the first fragment
         */
index 7cf73e6..8a2db92 100644 (file)
@@ -243,7 +243,9 @@ static int __net_init ip6mr_rules_init(struct net *net)
        return 0;
 
 err2:
+       rtnl_lock();
        ip6mr_free_table(mrt);
+       rtnl_unlock();
 err1:
        fib_rules_unregister(ops);
        return err;
index bed8155..909f937 100644 (file)
@@ -1371,27 +1371,23 @@ static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
 }
 
 /* called with rcu_read_lock() */
-int igmp6_event_query(struct sk_buff *skb)
+void igmp6_event_query(struct sk_buff *skb)
 {
        struct inet6_dev *idev = __in6_dev_get(skb->dev);
 
-       if (!idev)
-               return -EINVAL;
-
-       if (idev->dead) {
-               kfree_skb(skb);
-               return -ENODEV;
-       }
+       if (!idev || idev->dead)
+               goto out;
 
        spin_lock_bh(&idev->mc_query_lock);
        if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
                __skb_queue_tail(&idev->mc_query_queue, skb);
                if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
                        in6_dev_hold(idev);
+               skb = NULL;
        }
        spin_unlock_bh(&idev->mc_query_lock);
-
-       return 0;
+out:
+       kfree_skb(skb);
 }
 
 static void __mld_query_work(struct sk_buff *skb)
@@ -1542,27 +1538,23 @@ static void mld_query_work(struct work_struct *work)
 }
 
 /* called with rcu_read_lock() */
-int igmp6_event_report(struct sk_buff *skb)
+void igmp6_event_report(struct sk_buff *skb)
 {
        struct inet6_dev *idev = __in6_dev_get(skb->dev);
 
-       if (!idev)
-               return -EINVAL;
-
-       if (idev->dead) {
-               kfree_skb(skb);
-               return -ENODEV;
-       }
+       if (!idev || idev->dead)
+               goto out;
 
        spin_lock_bh(&idev->mc_report_lock);
        if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
                __skb_queue_tail(&idev->mc_report_queue, skb);
                if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
                        in6_dev_hold(idev);
+               skb = NULL;
        }
        spin_unlock_bh(&idev->mc_report_lock);
-
-       return 0;
+out:
+       kfree_skb(skb);
 }
 
 static void __mld_report_work(struct sk_buff *skb)
@@ -1759,7 +1751,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
        skb_reserve(skb, hlen);
        skb_tailroom_reserve(skb, mtu, tlen);
 
-       if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+       if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
                 * use unspecified address as the source address
                 * when a valid link-local address is not available.
index f4884cd..ea1cf41 100644 (file)
@@ -5753,11 +5753,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        }
 
        if (!dst) {
-               if (rt->offload)
+               if (READ_ONCE(rt->offload))
                        rtm->rtm_flags |= RTM_F_OFFLOAD;
-               if (rt->trap)
+               if (READ_ONCE(rt->trap))
                        rtm->rtm_flags |= RTM_F_TRAP;
-               if (rt->offload_failed)
+               if (READ_ONCE(rt->offload_failed))
                        rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
        }
 
@@ -6215,19 +6215,20 @@ void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
        struct sk_buff *skb;
        int err;
 
-       if (f6i->offload == offload && f6i->trap == trap &&
-           f6i->offload_failed == offload_failed)
+       if (READ_ONCE(f6i->offload) == offload &&
+           READ_ONCE(f6i->trap) == trap &&
+           READ_ONCE(f6i->offload_failed) == offload_failed)
                return;
 
-       f6i->offload = offload;
-       f6i->trap = trap;
+       WRITE_ONCE(f6i->offload, offload);
+       WRITE_ONCE(f6i->trap, trap);
 
        /* 2 means send notifications only if offload_failed was changed. */
        if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
-           f6i->offload_failed == offload_failed)
+           READ_ONCE(f6i->offload_failed) == offload_failed)
                return;
 
-       f6i->offload_failed = offload_failed;
+       WRITE_ONCE(f6i->offload_failed, offload_failed);
 
        if (!rcu_access_pointer(f6i->fib6_node))
                /* The route was removed from the tree, do not send
index de24a7d..9bf52a0 100644 (file)
@@ -2623,7 +2623,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
        }
 
        return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i,
-                           kma ? &k : NULL, net, NULL);
+                           kma ? &k : NULL, net, NULL, 0);
 
  out:
        return err;
index 74a878f..1deb3d8 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -626,6 +626,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                return -EINVAL;
        }
 
+       if (test_sta_flag(sta, WLAN_STA_MFP) &&
+           !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
+               ht_dbg(sdata,
+                      "MFP STA not authorized - deny BA session request %pM tid %d\n",
+                      sta->sta.addr, tid);
+               return -EINVAL;
+       }
+
        /*
         * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
         * member of an IBSS, and has no other existing Block Ack agreement
index 330ea62..e87bcca 100644 (file)
@@ -376,7 +376,7 @@ struct ieee80211_mgd_auth_data {
 
        u8 key[WLAN_KEY_LEN_WEP104];
        u8 key_len, key_idx;
-       bool done;
+       bool done, waiting;
        bool peer_confirmed;
        bool timeout_started;
 
index 1eeabdf..744842c 100644 (file)
@@ -37,6 +37,7 @@
 #define IEEE80211_AUTH_TIMEOUT_SAE     (HZ * 2)
 #define IEEE80211_AUTH_MAX_TRIES       3
 #define IEEE80211_AUTH_WAIT_ASSOC      (HZ * 5)
+#define IEEE80211_AUTH_WAIT_SAE_RETRY  (HZ * 2)
 #define IEEE80211_ASSOC_TIMEOUT                (HZ / 5)
 #define IEEE80211_ASSOC_TIMEOUT_LONG   (HZ / 2)
 #define IEEE80211_ASSOC_TIMEOUT_SHORT  (HZ / 10)
@@ -666,7 +667,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_he_6ghz_cap(sdata, skb);
 }
 
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -686,6 +687,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
        const struct ieee80211_sband_iftype_data *iftd;
        struct ieee80211_prep_tx_info info = {};
+       int ret;
 
        /* we know it's writable, cast away the const */
        if (assoc_data->ie_len)
@@ -699,7 +701,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        if (WARN_ON(!chanctx_conf)) {
                rcu_read_unlock();
-               return;
+               return -EINVAL;
        }
        chan = chanctx_conf->def.chan;
        rcu_read_unlock();
@@ -750,7 +752,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                        (iftd ? iftd->vendor_elems.len : 0),
                        GFP_KERNEL);
        if (!skb)
-               return;
+               return -ENOMEM;
 
        skb_reserve(skb, local->hw.extra_tx_headroom);
 
@@ -1031,15 +1033,22 @@ skip_rates:
                skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
        }
 
-       if (assoc_data->fils_kek_len &&
-           fils_encrypt_assoc_req(skb, assoc_data) < 0) {
-               dev_kfree_skb(skb);
-               return;
+       if (assoc_data->fils_kek_len) {
+               ret = fils_encrypt_assoc_req(skb, assoc_data);
+               if (ret < 0) {
+                       dev_kfree_skb(skb);
+                       return ret;
+               }
        }
 
        pos = skb_tail_pointer(skb);
        kfree(ifmgd->assoc_req_ies);
        ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
+       if (!ifmgd->assoc_req_ies) {
+               dev_kfree_skb(skb);
+               return -ENOMEM;
+       }
+
        ifmgd->assoc_req_ies_len = pos - ie_start;
 
        drv_mgd_prepare_tx(local, sdata, &info);
@@ -1049,6 +1058,8 @@ skip_rates:
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
                                                IEEE80211_TX_INTFL_MLME_CONN_TX;
        ieee80211_tx_skb(sdata, skb);
+
+       return 0;
 }
 
 void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -3001,8 +3012,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                    (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED ||
                     (auth_transaction == 1 &&
                      (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT ||
-                      status_code == WLAN_STATUS_SAE_PK))))
+                      status_code == WLAN_STATUS_SAE_PK)))) {
+                       /* waiting for userspace now */
+                       ifmgd->auth_data->waiting = true;
+                       ifmgd->auth_data->timeout =
+                               jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY;
+                       ifmgd->auth_data->timeout_started = true;
+                       run_again(sdata, ifmgd->auth_data->timeout);
                        goto notify_driver;
+               }
 
                sdata_info(sdata, "%pM denied authentication (status %d)\n",
                           mgmt->sa, status_code);
@@ -4497,6 +4515,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
        struct ieee80211_local *local = sdata->local;
+       int ret;
 
        sdata_assert_lock(sdata);
 
@@ -4517,7 +4536,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
        sdata_info(sdata, "associate with %pM (try %d/%d)\n",
                   assoc_data->bss->bssid, assoc_data->tries,
                   IEEE80211_ASSOC_MAX_TRIES);
-       ieee80211_send_assoc(sdata);
+       ret = ieee80211_send_assoc(sdata);
+       if (ret)
+               return ret;
 
        if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
                assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@ -4590,10 +4611,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
 
        if (ifmgd->auth_data && ifmgd->auth_data->timeout_started &&
            time_after(jiffies, ifmgd->auth_data->timeout)) {
-               if (ifmgd->auth_data->done) {
+               if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) {
                        /*
-                        * ok ... we waited for assoc but userspace didn't,
-                        * so let's just kill the auth data
+                        * ok ... we waited for assoc or continuation but
+                        * userspace didn't do it, so kill the auth data
                         */
                        ieee80211_destroy_auth_data(sdata, false);
                } else if (ieee80211_auth(sdata)) {
index 93680af..48d9553 100644 (file)
@@ -2607,7 +2607,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                 * address, so that the authenticator (e.g. hostapd) will see
                 * the frame, but bridge won't forward it anywhere else. Note
                 * that due to earlier filtering, the only other address can
-                * be the PAE group address.
+                * be the PAE group address, unless the hardware allowed them
+                * through in 802.3 offloaded mode.
                 */
                if (unlikely(skb->protocol == sdata->control_port_protocol &&
                             !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
@@ -2922,13 +2923,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
            ether_addr_equal(sdata->vif.addr, hdr->addr3))
                return RX_CONTINUE;
 
-       ac = ieee80211_select_queue_80211(sdata, skb, hdr);
+       ac = ieee802_1d_to_ac[skb->priority];
        q = sdata->vif.hw_queue[ac];
        if (ieee80211_queue_stopped(&local->hw, q)) {
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
                return RX_DROP_MONITOR;
        }
-       skb_set_queue_mapping(skb, q);
+       skb_set_queue_mapping(skb, ac);
 
        if (!--mesh_hdr->ttl) {
                if (!is_multicast_ether_addr(hdr->addr1))
@@ -4514,12 +4515,7 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
 
        /* deliver to local stack */
        skb->protocol = eth_type_trans(skb, fast_rx->dev);
-       memset(skb->cb, 0, sizeof(skb->cb));
-       if (rx->list)
-               list_add_tail(&skb->list, rx->list);
-       else
-               netif_receive_skb(skb);
-
+       ieee80211_deliver_skb_to_local_stack(skb, rx);
 }
 
 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
index 8d9f4ff..e52cef7 100644 (file)
@@ -412,13 +412,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
                         * this function.
                         */
                        rc = mctp_key_add(key, msk);
-                       if (rc)
+                       if (rc) {
                                kfree(key);
+                       } else {
+                               trace_mctp_key_acquire(key);
 
-                       trace_mctp_key_acquire(key);
-
-                       /* we don't need to release key->lock on exit */
-                       mctp_key_unref(key);
+                               /* we don't need to release key->lock on exit */
+                               mctp_key_unref(key);
+                       }
                        key = NULL;
 
                } else {
index 48f75a5..d6fdc57 100644 (file)
@@ -1607,6 +1607,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct mpls_dev *mdev;
        unsigned int flags;
+       int err;
 
        if (event == NETDEV_REGISTER) {
                mdev = mpls_add_dev(dev);
@@ -1621,7 +1622,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                return NOTIFY_OK;
 
        switch (event) {
-               int err;
 
        case NETDEV_DOWN:
                err = mpls_ifdown(dev, event);
index 3240b72..7558802 100644 (file)
@@ -35,12 +35,14 @@ static const struct snmp_mib mptcp_snmp_list[] = {
        SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
        SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
        SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD),
+       SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP),
        SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX),
        SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX),
        SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX),
        SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX),
        SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX),
        SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
+       SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP),
        SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
        SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
        SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
index ecd3d8b..2966fcb 100644 (file)
@@ -28,12 +28,14 @@ enum linux_mptcp_mib_field {
        MPTCP_MIB_ADDADDR,              /* Received ADD_ADDR with echo-flag=0 */
        MPTCP_MIB_ECHOADD,              /* Received ADD_ADDR with echo-flag=1 */
        MPTCP_MIB_PORTADD,              /* Received ADD_ADDR with a port-number */
+       MPTCP_MIB_ADDADDRDROP,          /* Dropped incoming ADD_ADDR */
        MPTCP_MIB_JOINPORTSYNRX,        /* Received a SYN MP_JOIN with a different port-number */
        MPTCP_MIB_JOINPORTSYNACKRX,     /* Received a SYNACK MP_JOIN with a different port-number */
        MPTCP_MIB_JOINPORTACKRX,        /* Received an ACK MP_JOIN with a different port-number */
        MPTCP_MIB_MISMATCHPORTSYNRX,    /* Received a SYN MP_JOIN with a mismatched port-number */
        MPTCP_MIB_MISMATCHPORTACKRX,    /* Received an ACK MP_JOIN with a mismatched port-number */
        MPTCP_MIB_RMADDR,               /* Received RM_ADDR */
+       MPTCP_MIB_RMADDRDROP,           /* Dropped incoming RM_ADDR */
        MPTCP_MIB_RMSUBFLOW,            /* Remove a subflow */
        MPTCP_MIB_MPPRIOTX,             /* Transmit a MP_PRIO */
        MPTCP_MIB_MPPRIORX,             /* Received a MP_PRIO */
index 696b2c4..7bea318 100644 (file)
@@ -213,6 +213,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
                mptcp_pm_add_addr_send_ack(msk);
        } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
                pm->remote = *addr;
+       } else {
+               __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
        }
 
        spin_unlock_bh(&pm->lock);
@@ -253,8 +255,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
                mptcp_event_addr_removed(msk, rm_list->ids[i]);
 
        spin_lock_bh(&pm->lock);
-       mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
-       pm->rm_list_rx = *rm_list;
+       if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
+               pm->rm_list_rx = *rm_list;
+       else
+               __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
        spin_unlock_bh(&pm->lock);
 }
 
index 782b1d4..4b5d795 100644 (file)
@@ -546,6 +546,16 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
        if (msk->pm.add_addr_signaled < add_addr_signal_max) {
                local = select_signal_address(pernet, msk);
 
+               /* due to racing events on both ends we can reach here while
+                * previous add address is still running: if we invoke now
+                * mptcp_pm_announce_addr(), that will fail and the
+                * corresponding id will be marked as used.
+                * Instead let the PM machinery reschedule us when the
+                * current address announce will be completed.
+                */
+               if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
+                       return;
+
                if (local) {
                        if (mptcp_pm_alloc_anno_list(msk, local)) {
                                __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
@@ -650,6 +660,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
        unsigned int add_addr_accept_max;
        struct mptcp_addr_info remote;
        unsigned int subflows_max;
+       bool reset_port = false;
        int i, nr;
 
        add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -659,15 +670,19 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
                 msk->pm.add_addr_accepted, add_addr_accept_max,
                 msk->pm.remote.family);
 
-       if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote))
+       remote = msk->pm.remote;
+       if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
                goto add_addr_echo;
 
+       /* pick id 0 port, if none is provided the remote address */
+       if (!remote.port) {
+               reset_port = true;
+               remote.port = sk->sk_dport;
+       }
+
        /* connect to the specified remote address, using whatever
         * local address the routing configuration will pick.
         */
-       remote = msk->pm.remote;
-       if (!remote.port)
-               remote.port = sk->sk_dport;
        nr = fill_local_addresses_vec(msk, addrs);
 
        msk->pm.add_addr_accepted++;
@@ -680,8 +695,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
                __mptcp_subflow_connect(sk, &addrs[i], &remote);
        spin_lock_bh(&msk->pm.lock);
 
+       /* be sure to echo exactly the received address */
+       if (reset_port)
+               remote.port = 0;
+
 add_addr_echo:
-       mptcp_pm_announce_addr(msk, &msk->pm.remote, true);
+       mptcp_pm_announce_addr(msk, &remote, true);
        mptcp_pm_nl_addr_send_ack(msk);
 }
 
@@ -925,6 +944,7 @@ out:
 static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
                                            struct mptcp_pm_addr_entry *entry)
 {
+       int addrlen = sizeof(struct sockaddr_in);
        struct sockaddr_storage addr;
        struct mptcp_sock *msk;
        struct socket *ssock;
@@ -949,8 +969,11 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
        }
 
        mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
-       err = kernel_bind(ssock, (struct sockaddr *)&addr,
-                         sizeof(struct sockaddr_in));
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+       if (entry->addr.family == AF_INET6)
+               addrlen = sizeof(struct sockaddr_in6);
+#endif
+       err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
        if (err) {
                pr_warn("kernel_bind error, err=%d", err);
                goto out;
index f60f01b..1c72f25 100644 (file)
@@ -466,9 +466,12 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
 static void mptcp_set_datafin_timeout(const struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 retransmits;
 
-       mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
-                                      TCP_RTO_MIN << icsk->icsk_retransmits);
+       retransmits = min_t(u32, icsk->icsk_retransmits,
+                           ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
+
+       mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
 }
 
 static void __mptcp_set_timeout(struct sock *sk, long tout)
@@ -3294,6 +3297,17 @@ static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
                return 0;
 
        delta = msk->write_seq - v;
+       if (__mptcp_check_fallback(msk) && msk->first) {
+               struct tcp_sock *tp = tcp_sk(msk->first);
+
+               /* the first subflow is disconnected after close - see
+                * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
+                * so ignore that status, too.
+                */
+               if (!((1 << msk->first->sk_state) &
+                     (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
+                       delta += READ_ONCE(tp->write_seq) - tp->snd_una;
+       }
        if (delta > INT_MAX)
                delta = INT_MAX;
 
index 354cb47..8a77a3f 100644 (file)
@@ -428,14 +428,15 @@ static int __nf_register_net_hook(struct net *net, int pf,
        p = nf_entry_dereference(*pp);
        new_hooks = nf_hook_entries_grow(p, reg);
 
-       if (!IS_ERR(new_hooks))
+       if (!IS_ERR(new_hooks)) {
+               hooks_validate(new_hooks);
                rcu_assign_pointer(*pp, new_hooks);
+       }
 
        mutex_unlock(&nf_hook_mutex);
        if (IS_ERR(new_hooks))
                return PTR_ERR(new_hooks);
 
-       hooks_validate(new_hooks);
 #ifdef CONFIG_NETFILTER_INGRESS
        if (nf_ingress_hook(reg, pf))
                net_inc_ingress_queue();
index ac43837..7032402 100644 (file)
@@ -2311,7 +2311,8 @@ ctnetlink_create_conntrack(struct net *net,
                        if (helper->from_nlattr)
                                helper->from_nlattr(helpinfo, ct);
 
-                       /* not in hash table yet so not strictly necessary */
+                       /* disable helper auto-assignment for this entry */
+                       ct->status |= IPS_HELPER;
                        RCU_INIT_POINTER(help->helper, helper);
                }
        } else {
index 2394238..5a93633 100644 (file)
@@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
                        pr_debug("Setting vtag %x for dir %d\n",
                                 ih->init_tag, !dir);
                        ct->proto.sctp.vtag[!dir] = ih->init_tag;
+
+                       /* don't renew timeout on init retransmit so
+                        * port reuse by client or NAT middlebox cannot
+                        * keep entry alive indefinitely (incl. nat info).
+                        */
+                       if (new_state == SCTP_CONNTRACK_CLOSED &&
+                           old_state == SCTP_CONNTRACK_CLOSED &&
+                           nf_ct_is_confirmed(ct))
+                               ignore = true;
                }
 
                ct->proto.sctp.state = new_state;
index af5115e..d1582b8 100644 (file)
@@ -446,6 +446,32 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
        }
 }
 
+static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+                           struct ip_ct_tcp_state *receiver,
+                           const struct sk_buff *skb,
+                           unsigned int dataoff,
+                           const struct tcphdr *tcph,
+                           u32 end, u32 win)
+{
+       /* SYN-ACK in reply to a SYN
+        * or SYN from reply direction in simultaneous open.
+        */
+       sender->td_end =
+       sender->td_maxend = end;
+       sender->td_maxwin = (win == 0 ? 1 : win);
+
+       tcp_options(skb, dataoff, tcph, sender);
+       /* RFC 1323:
+        * Both sides must send the Window Scale option
+        * to enable window scaling in either direction.
+        */
+       if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+             receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
+               sender->td_scale = 0;
+               receiver->td_scale = 0;
+       }
+}
+
 static bool tcp_in_window(struct nf_conn *ct,
                          enum ip_conntrack_dir dir,
                          unsigned int index,
@@ -499,24 +525,9 @@ static bool tcp_in_window(struct nf_conn *ct,
                 * Initialize sender data.
                 */
                if (tcph->syn) {
-                       /*
-                        * SYN-ACK in reply to a SYN
-                        * or SYN from reply direction in simultaneous open.
-                        */
-                       sender->td_end =
-                       sender->td_maxend = end;
-                       sender->td_maxwin = (win == 0 ? 1 : win);
-
-                       tcp_options(skb, dataoff, tcph, sender);
-                       /*
-                        * RFC 1323:
-                        * Both sides must send the Window Scale option
-                        * to enable window scaling in either direction.
-                        */
-                       if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
-                             && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
-                               sender->td_scale =
-                               receiver->td_scale = 0;
+                       tcp_init_sender(sender, receiver,
+                                       skb, dataoff, tcph,
+                                       end, win);
                        if (!tcph->ack)
                                /* Simultaneous open */
                                return true;
@@ -560,6 +571,18 @@ static bool tcp_in_window(struct nf_conn *ct,
                sender->td_maxwin = (win == 0 ? 1 : win);
 
                tcp_options(skb, dataoff, tcph, sender);
+       } else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
+                  state->state == TCP_CONNTRACK_SYN_SENT) {
+               /* Retransmitted syn-ack, or syn (simultaneous open).
+                *
+                * Re-init state for this direction, just like for the first
+                * syn(-ack) reply, it might differ in seq, ack or tcp options.
+                */
+               tcp_init_sender(sender, receiver,
+                               skb, dataoff, tcph,
+                               end, win);
+               if (!tcph->ack)
+                       return true;
        }
 
        if (!(tcph->ack)) {
index b561e0a..fc4265a 100644 (file)
@@ -110,7 +110,11 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
                nf_flow_rule_lwt_match(match, tun_info);
        }
 
-       key->meta.ingress_ifindex = tuple->iifidx;
+       if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
+               key->meta.ingress_ifindex = tuple->tc.iifidx;
+       else
+               key->meta.ingress_ifindex = tuple->iifidx;
+
        mask->meta.ingress_ifindex = 0xffffffff;
 
        if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
index 6d12afa..63d1516 100644 (file)
@@ -46,6 +46,15 @@ void nf_unregister_queue_handler(void)
 }
 EXPORT_SYMBOL(nf_unregister_queue_handler);
 
+static void nf_queue_sock_put(struct sock *sk)
+{
+#ifdef CONFIG_INET
+       sock_gen_put(sk);
+#else
+       sock_put(sk);
+#endif
+}
+
 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 {
        struct nf_hook_state *state = &entry->state;
@@ -54,7 +63,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
        dev_put(state->in);
        dev_put(state->out);
        if (state->sk)
-               sock_put(state->sk);
+               nf_queue_sock_put(state->sk);
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        dev_put(entry->physin);
@@ -87,19 +96,21 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
 }
 
 /* Bump dev refs so they don't vanish while packet is out */
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 {
        struct nf_hook_state *state = &entry->state;
 
+       if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
+               return false;
+
        dev_hold(state->in);
        dev_hold(state->out);
-       if (state->sk)
-               sock_hold(state->sk);
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        dev_hold(entry->physin);
        dev_hold(entry->physout);
 #endif
+       return true;
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
@@ -169,6 +180,18 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
                break;
        }
 
+       if (skb_sk_is_prefetched(skb)) {
+               struct sock *sk = skb->sk;
+
+               if (!sk_is_refcounted(sk)) {
+                       if (!refcount_inc_not_zero(&sk->sk_refcnt))
+                               return -ENOTCONN;
+
+                       /* drop refcount on skb_orphan */
+                       skb->destructor = sock_edemux;
+               }
+       }
+
        entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
        if (!entry)
                return -ENOMEM;
@@ -187,7 +210,10 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 
        __nf_queue_entry_init_physdevs(entry);
 
-       nf_queue_entry_get_refs(entry);
+       if (!nf_queue_entry_get_refs(entry)) {
+               kfree(entry);
+               return -ENOTCONN;
+       }
 
        switch (entry->state.pf) {
        case AF_INET:
index 5fa1699..c86748b 100644 (file)
@@ -4502,7 +4502,7 @@ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
        list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                list_del_rcu(&catchall->list);
                nft_set_elem_destroy(set, catchall->elem, true);
-               kfree_rcu(catchall);
+               kfree_rcu(catchall, rcu);
        }
 }
 
@@ -5669,7 +5669,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
        list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                if (catchall->elem == elem->priv) {
                        list_del_rcu(&catchall->list);
-                       kfree_rcu(catchall);
+                       kfree_rcu(catchall, rcu);
                        break;
                }
        }
@@ -6551,12 +6551,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
 {
        struct nft_object *newobj;
        struct nft_trans *trans;
-       int err;
+       int err = -ENOMEM;
+
+       if (!try_module_get(type->owner))
+               return -ENOENT;
 
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
                                sizeof(struct nft_trans_obj));
        if (!trans)
-               return -ENOMEM;
+               goto err_trans;
 
        newobj = nft_obj_init(ctx, type, attr);
        if (IS_ERR(newobj)) {
@@ -6573,6 +6576,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
 
 err_free_trans:
        kfree(trans);
+err_trans:
+       module_put(type->owner);
        return err;
 }
 
@@ -8185,7 +8190,7 @@ static void nft_obj_commit_update(struct nft_trans *trans)
        if (obj->ops->update)
                obj->ops->update(obj, newobj);
 
-       kfree(newobj);
+       nft_obj_destroy(&trans->ctx, newobj);
 }
 
 static void nft_commit_release(struct nft_trans *trans)
@@ -8976,7 +8981,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                        break;
                case NFT_MSG_NEWOBJ:
                        if (nft_trans_obj_update(trans)) {
-                               kfree(nft_trans_obj_newobj(trans));
+                               nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
@@ -9636,10 +9641,13 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain);
 
 static void __nft_release_hook(struct net *net, struct nft_table *table)
 {
+       struct nft_flowtable *flowtable;
        struct nft_chain *chain;
 
        list_for_each_entry(chain, &table->chains, list)
                nf_tables_unregister_hook(net, table, chain);
+       list_for_each_entry(flowtable, &table->flowtables, list)
+               nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
 }
 
 static void __nft_release_hooks(struct net *net)
index 9656c16..2d36952 100644 (file)
@@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
 
        expr = nft_expr_first(rule);
        while (nft_expr_more(rule, expr)) {
-               if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
+               if (expr->ops->offload_action &&
+                   expr->ops->offload_action(expr))
                        num_actions++;
 
                expr = nft_expr_next(expr);
index ea2d9c2..64a6acb 100644 (file)
@@ -710,9 +710,15 @@ static struct nf_queue_entry *
 nf_queue_entry_dup(struct nf_queue_entry *e)
 {
        struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
-       if (entry)
-               nf_queue_entry_get_refs(entry);
-       return entry;
+
+       if (!entry)
+               return NULL;
+
+       if (nf_queue_entry_get_refs(entry))
+               return entry;
+
+       kfree(entry);
+       return NULL;
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
index bbf3fcb..5b5c607 100644 (file)
@@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
        return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
 }
 
+static bool nft_dup_netdev_offload_action(const struct nft_expr *expr)
+{
+       return true;
+}
+
 static struct nft_expr_type nft_dup_netdev_type;
 static const struct nft_expr_ops nft_dup_netdev_ops = {
        .type           = &nft_dup_netdev_type,
@@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
        .init           = nft_dup_netdev_init,
        .dump           = nft_dup_netdev_dump,
        .offload        = nft_dup_netdev_offload,
+       .offload_action = nft_dup_netdev_offload_action,
 };
 
 static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
index dbe1f2e..9e927ab 100644 (file)
@@ -167,7 +167,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
 {
        struct tcphdr *tcph;
 
-       if (pkt->tprot != IPPROTO_TCP)
+       if (pkt->tprot != IPPROTO_TCP || pkt->fragoff)
                return NULL;
 
        tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
index fa9301c..619e394 100644 (file)
@@ -79,6 +79,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
        return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
 }
 
+static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
+{
+       return true;
+}
+
 struct nft_fwd_neigh {
        u8                      sreg_dev;
        u8                      sreg_addr;
@@ -222,6 +227,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
        .dump           = nft_fwd_netdev_dump,
        .validate       = nft_fwd_validate,
        .offload        = nft_fwd_netdev_offload,
+       .offload_action = nft_fwd_netdev_offload_action,
 };
 
 static const struct nft_expr_ops *
index 90c64d2..d0f67d3 100644 (file)
@@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx,
        return 0;
 }
 
+static bool nft_immediate_offload_action(const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       if (priv->dreg == NFT_REG_VERDICT)
+               return true;
+
+       return false;
+}
+
 static const struct nft_expr_ops nft_imm_ops = {
        .type           = &nft_imm_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
@@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = {
        .dump           = nft_immediate_dump,
        .validate       = nft_immediate_validate,
        .offload        = nft_immediate_offload,
-       .offload_flags  = NFT_OFFLOAD_F_ACTION,
+       .offload_action = nft_immediate_offload_action,
 };
 
 struct nft_expr_type nft_imm_type __read_mostly = {
index c4f3084..a726b62 100644 (file)
@@ -340,11 +340,20 @@ static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
        return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
 }
 
+static void nft_limit_obj_pkts_destroy(const struct nft_ctx *ctx,
+                                      struct nft_object *obj)
+{
+       struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
+
+       nft_limit_destroy(ctx, &priv->limit);
+}
+
 static struct nft_object_type nft_limit_obj_type;
 static const struct nft_object_ops nft_limit_obj_pkts_ops = {
        .type           = &nft_limit_obj_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
        .init           = nft_limit_obj_pkts_init,
+       .destroy        = nft_limit_obj_pkts_destroy,
        .eval           = nft_limit_obj_pkts_eval,
        .dump           = nft_limit_obj_pkts_dump,
 };
@@ -378,11 +387,20 @@ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
        return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
 }
 
+static void nft_limit_obj_bytes_destroy(const struct nft_ctx *ctx,
+                                       struct nft_object *obj)
+{
+       struct nft_limit_priv *priv = nft_obj_data(obj);
+
+       nft_limit_destroy(ctx, priv);
+}
+
 static struct nft_object_type nft_limit_obj_type;
 static const struct nft_object_ops nft_limit_obj_bytes_ops = {
        .type           = &nft_limit_obj_type,
        .size           = sizeof(struct nft_limit_priv),
        .init           = nft_limit_obj_bytes_init,
+       .destroy        = nft_limit_obj_bytes_destroy,
        .eval           = nft_limit_obj_bytes_eval,
        .dump           = nft_limit_obj_bytes_dump,
 };
index 940fed9..5cc06ae 100644 (file)
@@ -83,7 +83,7 @@ static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
 {
        unsigned int thoff = nft_thoff(pkt);
 
-       if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+       if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
                return -1;
 
        switch (pkt->tprot) {
@@ -147,7 +147,7 @@ void nft_payload_eval(const struct nft_expr *expr,
                offset = skb_network_offset(skb);
                break;
        case NFT_PAYLOAD_TRANSPORT_HEADER:
-               if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+               if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
                        goto err;
                offset = nft_thoff(pkt);
                break;
@@ -688,7 +688,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
                offset = skb_network_offset(skb);
                break;
        case NFT_PAYLOAD_TRANSPORT_HEADER:
-               if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+               if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
                        goto err;
                offset = nft_thoff(pkt);
                break;
@@ -728,7 +728,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
            pkt->tprot == IPPROTO_SCTP &&
            skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
+               if (pkt->fragoff == 0 &&
+                   nft_payload_csum_sctp(skb, nft_thoff(pkt)))
                        goto err;
        }
 
index a0109fa..1133e06 100644 (file)
@@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
                if (err)
                        goto nf_ct_failure;
                err = nf_synproxy_ipv6_init(snet, ctx->net);
-               if (err)
+               if (err) {
+                       nf_synproxy_ipv4_fini(snet, ctx->net);
                        goto nf_ct_failure;
+               }
                break;
        }
 
index 5e6459e..7013f55 100644 (file)
@@ -220,8 +220,10 @@ static void socket_mt_destroy(const struct xt_mtdtor_param *par)
 {
        if (par->family == NFPROTO_IPV4)
                nf_defrag_ipv4_disable(par->net);
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        else if (par->family == NFPROTO_IPV6)
-               nf_defrag_ipv4_disable(par->net);
+               nf_defrag_ipv6_disable(par->net);
+#endif
 }
 
 static struct xt_match socket_mt_reg[] __read_mostly = {
index 0767740..780d9e2 100644 (file)
@@ -423,12 +423,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
        memcpy(addr, new_addr, sizeof(__be32[4]));
 }
 
-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
+static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
 {
+       u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
+
+       ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
+
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
+                            (__force __wsum)(ipv6_tclass << 12));
+
+       ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
+}
+
+static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
+{
+       u32 ofl;
+
+       ofl = nh->flow_lbl[0] << 16 |  nh->flow_lbl[1] << 8 |  nh->flow_lbl[2];
+       fl = OVS_MASKED(ofl, fl, mask);
+
        /* Bits 21-24 are always unmasked, so this retains their values. */
-       OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
-       OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
-       OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+       nh->flow_lbl[0] = (u8)(fl >> 16);
+       nh->flow_lbl[1] = (u8)(fl >> 8);
+       nh->flow_lbl[2] = (u8)fl;
+
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
+}
+
+static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
+{
+       new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
+
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
+                            (__force __wsum)(new_ttl << 8));
+       nh->hop_limit = new_ttl;
 }
 
 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
@@ -546,18 +577,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                }
        }
        if (mask->ipv6_tclass) {
-               ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
+               set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
                flow_key->ip.tos = ipv6_get_dsfield(nh);
        }
        if (mask->ipv6_label) {
-               set_ipv6_fl(nh, ntohl(key->ipv6_label),
+               set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
                            ntohl(mask->ipv6_label));
                flow_key->ipv6.label =
                    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
        }
        if (mask->ipv6_hlimit) {
-               OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
-                              mask->ipv6_hlimit);
+               set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
                flow_key->ip.ttl = nh->hop_limit;
        }
        return 0;
index 32563ce..ca03e72 100644 (file)
@@ -274,7 +274,7 @@ static int tcf_action_offload_add_ex(struct tc_action *action,
        err = tc_setup_action(&fl_action->action, actions);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack,
-                                  "Failed to setup tc actions for offload\n");
+                                  "Failed to setup tc actions for offload");
                goto fl_err;
        }
 
@@ -1037,6 +1037,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
 restart_act_graph:
        for (i = 0; i < nr_actions; i++) {
                const struct tc_action *a = actions[i];
+               int repeat_ttl;
 
                if (jmp_prgcnt > 0) {
                        jmp_prgcnt -= 1;
@@ -1045,11 +1046,17 @@ restart_act_graph:
 
                if (tc_act_skip_sw(a->tcfa_flags))
                        continue;
+
+               repeat_ttl = 32;
 repeat:
                ret = a->ops->act(skb, a, res);
-               if (ret == TC_ACT_REPEAT)
-                       goto repeat;    /* we need a ttl - JHS */
-
+               if (unlikely(ret == TC_ACT_REPEAT)) {
+                       if (--repeat_ttl != 0)
+                               goto repeat;
+                       /* suspicious opcode, stop pipeline */
+                       net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
+                       return TC_ACT_OK;
+               }
                if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
                        jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
                        if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
index f99247f..ec19f62 100644 (file)
@@ -361,6 +361,13 @@ static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
        }
 }
 
+static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
+                                struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
+{
+       entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
+       entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+}
+
 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
                                  struct nf_conn *ct,
                                  bool tcp)
@@ -385,10 +392,8 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
 
        act_ct_ext = nf_conn_act_ct_ext_find(ct);
        if (act_ct_ext) {
-               entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
-                       act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
-               entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
-                       act_ct_ext->ifindex[IP_CT_DIR_REPLY];
+               tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
+               tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
        }
 
        err = flow_offload_add(&ct_ft->nf_ft, entry);
@@ -533,11 +538,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
        struct nf_conn *ct;
        u8 dir;
 
-       /* Previously seen or loopback */
-       ct = nf_ct_get(skb, &ctinfo);
-       if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
-               return false;
-
        switch (family) {
        case NFPROTO_IPV4:
                if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
index 5f0f346..5ce1208 100644 (file)
@@ -1044,7 +1044,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
 
        /* Find qdisc */
        if (!*parent) {
-               *q = dev->qdisc;
+               *q = rcu_dereference(dev->qdisc);
                *parent = (*q)->handle;
        } else {
                *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
@@ -2587,7 +2587,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 
                parent = tcm->tcm_parent;
                if (!parent)
-                       q = dev->qdisc;
+                       q = rtnl_dereference(dev->qdisc);
                else
                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
                if (!q)
@@ -2962,7 +2962,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
                        return skb->len;
 
                if (!tcm->tcm_parent)
-                       q = dev->qdisc;
+                       q = rtnl_dereference(dev->qdisc);
                else
                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
 
index 179825a..e3c0e8e 100644 (file)
@@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 
        if (!handle)
                return NULL;
-       q = qdisc_match_from_root(dev->qdisc, handle);
+       q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
        if (q)
                goto out;
 
@@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
 
        if (!handle)
                return NULL;
-       q = qdisc_match_from_root(dev->qdisc, handle);
+       q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
        if (q)
                goto out;
 
@@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 skip:
                if (!ingress) {
                        notify_and_destroy(net, skb, n, classid,
-                                          dev->qdisc, new);
+                                          rtnl_dereference(dev->qdisc), new);
                        if (new && !new->ops->attach)
                                qdisc_refcount_inc(new);
-                       dev->qdisc = new ? : &noop_qdisc;
+                       rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
 
                        if (new && new->ops->attach)
                                new->ops->attach(new);
@@ -1451,7 +1451,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                                q = dev_ingress_queue(dev)->qdisc_sleeping;
                        }
                } else {
-                       q = dev->qdisc;
+                       q = rtnl_dereference(dev->qdisc);
                }
                if (!q) {
                        NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
@@ -1540,7 +1540,7 @@ replay:
                                q = dev_ingress_queue(dev)->qdisc_sleeping;
                        }
                } else {
-                       q = dev->qdisc;
+                       q = rtnl_dereference(dev->qdisc);
                }
 
                /* It may be default qdisc, ignore it */
@@ -1762,7 +1762,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                        s_q_idx = 0;
                q_idx = 0;
 
-               if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+               if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
+                                      skb, cb, &q_idx, s_q_idx,
                                       true, tca[TCA_DUMP_INVISIBLE]) < 0)
                        goto done;
 
@@ -2033,7 +2034,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
                } else if (qid1) {
                        qid = qid1;
                } else if (qid == 0)
-                       qid = dev->qdisc->handle;
+                       qid = rtnl_dereference(dev->qdisc)->handle;
 
                /* Now qid is genuine qdisc handle consistent
                 * both with parent and child.
@@ -2044,7 +2045,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
                        portid = TC_H_MAKE(qid, portid);
        } else {
                if (qid == 0)
-                       qid = dev->qdisc->handle;
+                       qid = rtnl_dereference(dev->qdisc)->handle;
        }
 
        /* OK. Locate qdisc */
@@ -2205,7 +2206,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
        s_t = cb->args[0];
        t = 0;
 
-       if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
+       if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
+                               skb, tcm, cb, &t, s_t, true) < 0)
                goto done;
 
        dev_queue = dev_ingress_queue(dev);
index f893d9a..5bab9f8 100644 (file)
@@ -1164,30 +1164,33 @@ static void attach_default_qdiscs(struct net_device *dev)
        if (!netif_is_multiqueue(dev) ||
            dev->priv_flags & IFF_NO_QUEUE) {
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
-               dev->qdisc = txq->qdisc_sleeping;
-               qdisc_refcount_inc(dev->qdisc);
+               qdisc = txq->qdisc_sleeping;
+               rcu_assign_pointer(dev->qdisc, qdisc);
+               qdisc_refcount_inc(qdisc);
        } else {
                qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
                if (qdisc) {
-                       dev->qdisc = qdisc;
+                       rcu_assign_pointer(dev->qdisc, qdisc);
                        qdisc->ops->attach(qdisc);
                }
        }
+       qdisc = rtnl_dereference(dev->qdisc);
 
        /* Detect default qdisc setup/init failed and fallback to "noqueue" */
-       if (dev->qdisc == &noop_qdisc) {
+       if (qdisc == &noop_qdisc) {
                netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
                            default_qdisc_ops->id, noqueue_qdisc_ops.id);
                dev->priv_flags |= IFF_NO_QUEUE;
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
-               dev->qdisc = txq->qdisc_sleeping;
-               qdisc_refcount_inc(dev->qdisc);
+               qdisc = txq->qdisc_sleeping;
+               rcu_assign_pointer(dev->qdisc, qdisc);
+               qdisc_refcount_inc(qdisc);
                dev->priv_flags ^= IFF_NO_QUEUE;
        }
 
 #ifdef CONFIG_NET_SCHED
-       if (dev->qdisc != &noop_qdisc)
-               qdisc_hash_add(dev->qdisc, false);
+       if (qdisc != &noop_qdisc)
+               qdisc_hash_add(qdisc, false);
 #endif
 }
 
@@ -1217,7 +1220,7 @@ void dev_activate(struct net_device *dev)
         * and noqueue_qdisc for virtual interfaces
         */
 
-       if (dev->qdisc == &noop_qdisc)
+       if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
                attach_default_qdiscs(dev);
 
        if (!netif_carrier_ok(dev))
@@ -1383,7 +1386,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev,
 void dev_qdisc_change_real_num_tx(struct net_device *dev,
                                  unsigned int new_real_tx)
 {
-       struct Qdisc *qdisc = dev->qdisc;
+       struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
 
        if (qdisc->ops->change_real_num_tx)
                qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
@@ -1447,7 +1450,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
 
 void dev_init_scheduler(struct net_device *dev)
 {
-       dev->qdisc = &noop_qdisc;
+       rcu_assign_pointer(dev->qdisc, &noop_qdisc);
        netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
        if (dev_ingress_queue(dev))
                dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
@@ -1475,8 +1478,8 @@ void dev_shutdown(struct net_device *dev)
        netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
        if (dev_ingress_queue(dev))
                shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
-       qdisc_put(dev->qdisc);
-       dev->qdisc = &noop_qdisc;
+       qdisc_put(rtnl_dereference(dev->qdisc));
+       rcu_assign_pointer(dev->qdisc, &noop_qdisc);
 
        WARN_ON(timer_pending(&dev->watchdog_timer));
 }
index 8c89d0b..284befa 100644 (file)
@@ -183,7 +183,7 @@ static int smc_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct smc_sock *smc;
-       int rc = 0;
+       int old_state, rc = 0;
 
        if (!sk)
                goto out;
@@ -191,8 +191,10 @@ static int smc_release(struct socket *sock)
        sock_hold(sk); /* sock_put below */
        smc = smc_sk(sk);
 
+       old_state = sk->sk_state;
+
        /* cleanup for a dangling non-blocking connect */
-       if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
+       if (smc->connect_nonblock && old_state == SMC_INIT)
                tcp_abort(smc->clcsock->sk, ECONNABORTED);
 
        if (cancel_work_sync(&smc->connect_work))
@@ -206,6 +208,10 @@ static int smc_release(struct socket *sock)
        else
                lock_sock(sk);
 
+       if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
+           !smc->use_fallback)
+               smc_close_active_abort(smc);
+
        rc = __smc_release(smc);
 
        /* detach socket */
@@ -667,14 +673,17 @@ static void smc_fback_error_report(struct sock *clcsk)
 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
 {
        struct sock *clcsk;
+       int rc = 0;
 
        mutex_lock(&smc->clcsock_release_lock);
        if (!smc->clcsock) {
-               mutex_unlock(&smc->clcsock_release_lock);
-               return -EBADF;
+               rc = -EBADF;
+               goto out;
        }
        clcsk = smc->clcsock->sk;
 
+       if (smc->use_fallback)
+               goto out;
        smc->use_fallback = true;
        smc->fallback_rsn = reason_code;
        smc_stat_fallback(smc);
@@ -702,8 +711,9 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
                smc->clcsock->sk->sk_user_data =
                        (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
        }
+out:
        mutex_unlock(&smc->clcsock_release_lock);
-       return 0;
+       return rc;
 }
 
 /* fall back during connect */
@@ -3077,12 +3087,14 @@ static int __init smc_init(void)
        rc = tcp_register_ulp(&smc_ulp_ops);
        if (rc) {
                pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
-               goto out_sock;
+               goto out_ib;
        }
 
        static_branch_enable(&tcp_have_smc);
        return 0;
 
+out_ib:
+       smc_ib_unregister_client();
 out_sock:
        sock_unregister(PF_SMC);
 out_proto6:
index 29525d0..be7d704 100644 (file)
@@ -1161,8 +1161,8 @@ void smc_conn_free(struct smc_connection *conn)
                        cancel_work_sync(&conn->abort_work);
        }
        if (!list_empty(&lgr->list)) {
-               smc_lgr_unregister_conn(conn);
                smc_buf_unuse(conn, lgr); /* allow buffer reuse */
+               smc_lgr_unregister_conn(conn);
        }
 
        if (!lgr->conns_num)
@@ -1864,7 +1864,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
                    (ini->smcd_version == SMC_V2 ||
                     lgr->vlan_id == ini->vlan_id) &&
                    (role == SMC_CLNT || ini->is_smcd ||
-                    lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
+                   (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
+                     !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
                        /* link group found */
                        ini->first_contact_local = 0;
                        conn->lgr = lgr;
index 291f148..29f0a55 100644 (file)
@@ -113,7 +113,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
        pnettable = &sn->pnettable;
 
        /* remove table entry */
-       write_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist,
                                 list) {
                if (!pnet_name ||
@@ -131,7 +131,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        rc = 0;
                }
        }
-       write_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
 
        /* if this is not the initial namespace, stop here */
        if (net != &init_net)
@@ -192,7 +192,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
        sn = net_generic(net, smc_net_id);
        pnettable = &sn->pnettable;
 
-       write_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
                if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
                    !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
@@ -206,7 +206,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
                        break;
                }
        }
-       write_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
        return rc;
 }
 
@@ -224,7 +224,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
        sn = net_generic(net, smc_net_id);
        pnettable = &sn->pnettable;
 
-       write_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
                if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
                        dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
@@ -237,7 +237,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
                        break;
                }
        }
-       write_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
        return rc;
 }
 
@@ -368,12 +368,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
        new_pe->type = SMC_PNET_ETH;
        memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
        strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
-       new_pe->ndev = ndev;
-       if (ndev)
-               netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL);
        rc = -EEXIST;
        new_netdev = true;
-       write_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
                if (tmp_pe->type == SMC_PNET_ETH &&
                    !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) {
@@ -382,10 +379,15 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
                }
        }
        if (new_netdev) {
+               if (ndev) {
+                       new_pe->ndev = ndev;
+                       netdev_tracker_alloc(ndev, &new_pe->dev_tracker,
+                                            GFP_ATOMIC);
+               }
                list_add_tail(&new_pe->list, &pnettable->pnetlist);
-               write_unlock(&pnettable->lock);
+               mutex_unlock(&pnettable->lock);
        } else {
-               write_unlock(&pnettable->lock);
+               mutex_unlock(&pnettable->lock);
                kfree(new_pe);
                goto out_put;
        }
@@ -446,7 +448,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
        new_pe->ib_port = ib_port;
 
        new_ibdev = true;
-       write_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
                if (tmp_pe->type == SMC_PNET_IB &&
                    !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
@@ -456,9 +458,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
        }
        if (new_ibdev) {
                list_add_tail(&new_pe->list, &pnettable->pnetlist);
-               write_unlock(&pnettable->lock);
+               mutex_unlock(&pnettable->lock);
        } else {
-               write_unlock(&pnettable->lock);
+               mutex_unlock(&pnettable->lock);
                kfree(new_pe);
        }
        return (new_ibdev) ? 0 : -EEXIST;
@@ -603,7 +605,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
        pnettable = &sn->pnettable;
 
        /* dump pnettable entries */
-       read_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
                if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid))
                        continue;
@@ -618,7 +620,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
                        break;
                }
        }
-       read_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
        return idx;
 }
 
@@ -862,7 +864,7 @@ int smc_pnet_net_init(struct net *net)
        struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev;
 
        INIT_LIST_HEAD(&pnettable->pnetlist);
-       rwlock_init(&pnettable->lock);
+       mutex_init(&pnettable->lock);
        INIT_LIST_HEAD(&pnetids_ndev->list);
        rwlock_init(&pnetids_ndev->lock);
 
@@ -942,7 +944,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
        sn = net_generic(net, smc_net_id);
        pnettable = &sn->pnettable;
 
-       read_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
                if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) {
                        /* get pnetid of netdev device */
@@ -951,7 +953,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
                        break;
                }
        }
-       read_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
        return rc;
 }
 
@@ -1154,7 +1156,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
        sn = net_generic(&init_net, smc_net_id);
        pnettable = &sn->pnettable;
 
-       read_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
                if (tmp_pe->type == SMC_PNET_IB &&
                    !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) &&
@@ -1164,7 +1166,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
                        break;
                }
        }
-       read_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
 
        return rc;
 }
@@ -1183,7 +1185,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
        sn = net_generic(&init_net, smc_net_id);
        pnettable = &sn->pnettable;
 
-       read_lock(&pnettable->lock);
+       mutex_lock(&pnettable->lock);
        list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
                if (tmp_pe->type == SMC_PNET_IB &&
                    !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
@@ -1192,7 +1194,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
                        break;
                }
        }
-       read_unlock(&pnettable->lock);
+       mutex_unlock(&pnettable->lock);
 
        return rc;
 }
index 1403927..80a88ee 100644 (file)
@@ -29,7 +29,7 @@ struct smc_link_group;
  * @pnetlist: List of PNETIDs
  */
 struct smc_pnettable {
-       rwlock_t lock;
+       struct mutex lock;
        struct list_head pnetlist;
 };
 
index 50cf757..982eeca 100644 (file)
@@ -3448,7 +3448,7 @@ EXPORT_SYMBOL(kernel_connect);
  *     @addr: address holder
  *
  *     Fills the @addr pointer with the address which the socket is bound.
- *     Returns 0 or an error code.
+ *     Returns the length of the address in bytes or an error code.
  */
 
 int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
@@ -3463,7 +3463,7 @@ EXPORT_SYMBOL(kernel_getsockname);
  *     @addr: address holder
  *
  *     Fills the @addr pointer with the address which the socket is connected.
- *     Returns 0 or an error code.
+ *     Returns the length of the address in bytes or an error code.
  */
 
 int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
index b64a028..05c758d 100644 (file)
@@ -115,11 +115,14 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
        }
 
        sock = container_of(xprt, struct sock_xprt, xprt);
-       if (kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
+       mutex_lock(&sock->recv_mutex);
+       if (sock->sock == NULL ||
+           kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
                goto out;
 
        ret = sprintf(buf, "%pISc\n", &saddr);
 out:
+       mutex_unlock(&sock->recv_mutex);
        xprt_put(xprt);
        return ret + 1;
 }
index f172d12..7b5fce2 100644 (file)
@@ -413,6 +413,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
                                              IB_POLL_WORKQUEUE);
        if (IS_ERR(ep->re_attr.send_cq)) {
                rc = PTR_ERR(ep->re_attr.send_cq);
+               ep->re_attr.send_cq = NULL;
                goto out_destroy;
        }
 
@@ -421,6 +422,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
                                              IB_POLL_WORKQUEUE);
        if (IS_ERR(ep->re_attr.recv_cq)) {
                rc = PTR_ERR(ep->re_attr.recv_cq);
+               ep->re_attr.recv_cq = NULL;
                goto out_destroy;
        }
        ep->re_receive_count = 0;
@@ -459,6 +461,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
        ep->re_pd = ib_alloc_pd(device, 0);
        if (IS_ERR(ep->re_pd)) {
                rc = PTR_ERR(ep->re_pd);
+               ep->re_pd = NULL;
                goto out_destroy;
        }
 
index 69b6ee5..0f39e08 100644 (file)
@@ -1641,7 +1641,12 @@ static int xs_get_srcport(struct sock_xprt *transport)
 unsigned short get_srcport(struct rpc_xprt *xprt)
 {
        struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
-       return xs_sock_getport(sock->sock);
+       unsigned short ret = 0;
+       mutex_lock(&sock->recv_mutex);
+       if (sock->sock)
+               ret = xs_sock_getport(sock->sock);
+       mutex_unlock(&sock->recv_mutex);
+       return ret;
 }
 EXPORT_SYMBOL(get_srcport);
 
index 9325479..f09316a 100644 (file)
@@ -2276,7 +2276,7 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
        struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
        struct tipc_aead_key *skey = NULL;
        u16 key_gen = msg_key_gen(hdr);
-       u16 size = msg_data_sz(hdr);
+       u32 size = msg_data_sz(hdr);
        u8 *data = msg_data(hdr);
        unsigned int keylen;
 
index 8d9e09f..1e14d7f 100644 (file)
@@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        struct tipc_msg *hdr = buf_msg(skb);
        struct tipc_gap_ack_blks *ga = NULL;
        bool reply = msg_probe(hdr), retransmitted = false;
-       u16 dlen = msg_data_sz(hdr), glen = 0;
+       u32 dlen = msg_data_sz(hdr), glen = 0;
        u16 peers_snd_nxt =  msg_next_sent(hdr);
        u16 peers_tol = msg_link_tolerance(hdr);
        u16 peers_prio = msg_linkprio(hdr);
@@ -2214,6 +2214,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        void *data;
 
        trace_tipc_proto_rcv(skb, false, l->name);
+
+       if (dlen > U16_MAX)
+               goto exit;
+
        if (tipc_link_is_blocked(l) || !xmitq)
                goto exit;
 
@@ -2309,7 +2313,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
 
                /* Receive Gap ACK blocks from peer if any */
                glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
-
+               if(glen > dlen)
+                       break;
                tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
                             &l->mon_state, l->bearer_id);
 
index 4076196..2f4d232 100644 (file)
@@ -496,6 +496,8 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
        state->probing = false;
 
        /* Sanity check received domain record */
+       if (new_member_cnt > MAX_MON_DOMAIN)
+               return;
        if (dlen < dom_rec_len(arrv_dom, 0))
                return;
        if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
index bda902c..8267b75 100644 (file)
@@ -313,7 +313,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
                pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
                                    ua.sr.type, ua.sr.lower, node);
        } else {
-               pr_warn("Unrecognized name table message received\n");
+               pr_warn_ratelimited("Unknown name table message received\n");
        }
        return false;
 }
index 01396dd..1d8ba23 100644 (file)
@@ -967,7 +967,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
                list_for_each_entry(p, &sr->all_publ, all_publ)
                        if (p->key == *last_key)
                                break;
-               if (p->key != *last_key)
+               if (list_entry_is_head(p, &sr->all_publ, all_publ))
                        return -EPIPE;
        } else {
                p = list_first_entry(&sr->all_publ,
index 9947b7d..6ef95ce 100644 (file)
@@ -403,7 +403,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
        u32 flags = n->action_flags;
        struct list_head *publ_list;
        struct tipc_uaddr ua;
-       u32 bearer_id;
+       u32 bearer_id, node;
 
        if (likely(!flags)) {
                write_unlock_bh(&n->lock);
@@ -413,7 +413,8 @@ static void tipc_node_write_unlock(struct tipc_node *n)
        tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
                   TIPC_LINK_STATE, n->addr, n->addr);
        sk.ref = n->link_id;
-       sk.node = n->addr;
+       sk.node = tipc_own_addr(net);
+       node = n->addr;
        bearer_id = n->link_id & 0xffff;
        publ_list = &n->publ_list;
 
@@ -423,17 +424,17 @@ static void tipc_node_write_unlock(struct tipc_node *n)
        write_unlock_bh(&n->lock);
 
        if (flags & TIPC_NOTIFY_NODE_DOWN)
-               tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
+               tipc_publ_notify(net, publ_list, node, n->capabilities);
 
        if (flags & TIPC_NOTIFY_NODE_UP)
-               tipc_named_node_up(net, sk.node, n->capabilities);
+               tipc_named_node_up(net, node, n->capabilities);
 
        if (flags & TIPC_NOTIFY_LINK_UP) {
-               tipc_mon_peer_up(net, sk.node, bearer_id);
+               tipc_mon_peer_up(net, node, bearer_id);
                tipc_nametbl_publish(net, &ua, &sk, sk.ref);
        }
        if (flags & TIPC_NOTIFY_LINK_DOWN) {
-               tipc_mon_peer_down(net, sk.node, bearer_id);
+               tipc_mon_peer_down(net, node, bearer_id);
                tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
        }
 }
index 3e63c83..7545321 100644 (file)
@@ -3749,7 +3749,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
                        if (p->key == *last_publ)
                                break;
                }
-               if (p->key != *last_publ) {
+               if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
                        /* We never set seq or call nl_dump_check_consistent()
                         * this means that setting prev_seq here will cause the
                         * consistence check to fail in the netlink callback
index 3235261..38baeb1 100644 (file)
@@ -1401,6 +1401,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
                        sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
                        sock->state = SS_UNCONNECTED;
                        vsock_transport_cancel_pkt(vsk);
+                       vsock_remove_connected(vsk);
                        goto out_wait;
                } else if (timeout == 0) {
                        err = -ETIMEDOUT;
index 1e9be50..527ae66 100644 (file)
@@ -33,7 +33,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
          echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
         ) > $@
 
-$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDI) \
+$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR) \
                      $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR)/*.x509)
        @$(kecho) "  GEN     $@"
        $(Q)(set -e; \
index 3a54c8e..f08d4b3 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010         Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -332,29 +332,20 @@ static void cfg80211_event_work(struct work_struct *work)
 void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
 {
        struct wireless_dev *wdev, *tmp;
-       bool found = false;
 
        ASSERT_RTNL();
 
-       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+       list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
                if (wdev->nl_owner_dead) {
                        if (wdev->netdev)
                                dev_close(wdev->netdev);
-                       found = true;
-               }
-       }
-
-       if (!found)
-               return;
 
-       wiphy_lock(&rdev->wiphy);
-       list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
-               if (wdev->nl_owner_dead) {
+                       wiphy_lock(&rdev->wiphy);
                        cfg80211_leave(rdev, wdev);
                        rdev_del_virtual_intf(rdev, wdev);
+                       wiphy_unlock(&rdev->wiphy);
                }
        }
-       wiphy_unlock(&rdev->wiphy);
 }
 
 static void cfg80211_destroy_iface_wk(struct work_struct *work)
index 578bff9..c01fbcc 100644 (file)
@@ -13411,6 +13411,9 @@ static int handle_nan_filter(struct nlattr *attr_filter,
        i = 0;
        nla_for_each_nested(attr, attr_filter, rem) {
                filter[i].filter = nla_memdup(attr, GFP_KERNEL);
+               if (!filter[i].filter)
+                       goto err;
+
                filter[i].len = nla_len(attr);
                i++;
        }
@@ -13423,6 +13426,15 @@ static int handle_nan_filter(struct nlattr *attr_filter,
        }
 
        return 0;
+
+err:
+       i = 0;
+       nla_for_each_nested(attr, attr_filter, rem) {
+               kfree(filter[i].filter);
+               i++;
+       }
+       kfree(filter);
+       return -ENOMEM;
 }
 
 static int nl80211_nan_add_func(struct sk_buff *skb,
@@ -17816,7 +17828,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
        wdev->chandef = *chandef;
        wdev->preset_chandef = *chandef;
 
-       if (wdev->iftype == NL80211_IFTYPE_STATION &&
+       if ((wdev->iftype == NL80211_IFTYPE_STATION ||
+            wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
            !WARN_ON(!wdev->current_bss))
                cfg80211_update_assoc_bss_entry(wdev, chandef->chan);
 
index 3fa0664..39bce5d 100644 (file)
@@ -223,6 +223,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        if (x->encap || x->tfcpad)
                return -EINVAL;
 
+       if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
+               return -EINVAL;
+
        dev = dev_get_by_index(net, xuo->ifindex);
        if (!dev) {
                if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
@@ -262,7 +265,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
        xso->real_dev = dev;
        xso->num_exthdrs = 1;
-       xso->flags = xuo->flags;
+       /* Don't forward bit that is not implemented */
+       xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
 
        err = dev->xfrmdev_ops->xdo_dev_state_add(x);
        if (err) {
index 57448fc..4e3c62d 100644 (file)
@@ -673,12 +673,12 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
        struct net *net = xi->net;
        struct xfrm_if_parms p = {};
 
+       xfrmi_netlink_parms(data, &p);
        if (!p.if_id) {
                NL_SET_ERR_MSG(extack, "if_id must be non zero");
                return -EINVAL;
        }
 
-       xfrmi_netlink_parms(data, &p);
        xi = xfrmi_locate(net, &p);
        if (!xi) {
                xi = netdev_priv(dev);
index 04d1ce9..8825261 100644 (file)
@@ -4256,7 +4256,7 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
 }
 
 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
-                                                   u8 dir, u8 type, struct net *net)
+                                                   u8 dir, u8 type, struct net *net, u32 if_id)
 {
        struct xfrm_policy *pol, *ret = NULL;
        struct hlist_head *chain;
@@ -4265,7 +4265,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
        spin_lock_bh(&net->xfrm.xfrm_policy_lock);
        chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
        hlist_for_each_entry(pol, chain, bydst) {
-               if (xfrm_migrate_selector_match(sel, &pol->selector) &&
+               if ((if_id == 0 || pol->if_id == if_id) &&
+                   xfrm_migrate_selector_match(sel, &pol->selector) &&
                    pol->type == type) {
                        ret = pol;
                        priority = ret->priority;
@@ -4277,7 +4278,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
                if ((pol->priority >= priority) && ret)
                        break;
 
-               if (xfrm_migrate_selector_match(sel, &pol->selector) &&
+               if ((if_id == 0 || pol->if_id == if_id) &&
+                   xfrm_migrate_selector_match(sel, &pol->selector) &&
                    pol->type == type) {
                        ret = pol;
                        break;
@@ -4393,7 +4395,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
                 struct xfrm_migrate *m, int num_migrate,
                 struct xfrm_kmaddress *k, struct net *net,
-                struct xfrm_encap_tmpl *encap)
+                struct xfrm_encap_tmpl *encap, u32 if_id)
 {
        int i, err, nx_cur = 0, nx_new = 0;
        struct xfrm_policy *pol = NULL;
@@ -4412,14 +4414,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
        }
 
        /* Stage 1 - find policy */
-       if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
+       if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
                err = -ENOENT;
                goto out;
        }
 
        /* Stage 2 - find and update state(s) */
        for (i = 0, mp = m; i < num_migrate; i++, mp++) {
-               if ((x = xfrm_migrate_state_find(mp, net))) {
+               if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
                        x_cur[nx_cur] = x;
                        nx_cur++;
                        xc = xfrm_state_migrate(x, mp, encap);
index ca6bee1..b749935 100644 (file)
@@ -1579,9 +1579,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
        memcpy(&x->mark, &orig->mark, sizeof(x->mark));
        memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
 
-       if (xfrm_init_state(x) < 0)
-               goto error;
-
        x->props.flags = orig->props.flags;
        x->props.extra_flags = orig->props.extra_flags;
 
@@ -1606,7 +1603,8 @@ out:
        return NULL;
 }
 
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+                                               u32 if_id)
 {
        unsigned int h;
        struct xfrm_state *x = NULL;
@@ -1622,6 +1620,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
                                continue;
                        if (m->reqid && x->props.reqid != m->reqid)
                                continue;
+                       if (if_id != 0 && x->if_id != if_id)
+                               continue;
                        if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
                                             m->old_family) ||
                            !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
@@ -1637,6 +1637,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
                        if (x->props.mode != m->mode ||
                            x->id.proto != m->proto)
                                continue;
+                       if (if_id != 0 && x->if_id != if_id)
+                               continue;
                        if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
                                             m->old_family) ||
                            !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
@@ -1663,6 +1665,11 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
        if (!xc)
                return NULL;
 
+       xc->props.family = m->new_family;
+
+       if (xfrm_init_state(xc) < 0)
+               goto error;
+
        memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
        memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
 
@@ -2572,7 +2579,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
 }
 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
 
-u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
+u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
 {
        const struct xfrm_type *type = READ_ONCE(x->type);
        struct crypto_aead *aead;
@@ -2603,17 +2610,7 @@ u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
        return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
                 net_adj) & ~(blksize - 1)) + net_adj - 2;
 }
-EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
-
-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
-{
-       mtu = __xfrm_state_mtu(x, mtu);
-
-       if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
-               return IPV6_MIN_MTU;
-
-       return mtu;
-}
+EXPORT_SYMBOL_GPL(xfrm_state_mtu);
 
 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
 {
index 8cd6c81..a4fb596 100644 (file)
@@ -2608,6 +2608,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
        int n = 0;
        struct net *net = sock_net(skb->sk);
        struct xfrm_encap_tmpl  *encap = NULL;
+       u32 if_id = 0;
 
        if (attrs[XFRMA_MIGRATE] == NULL)
                return -EINVAL;
@@ -2632,7 +2633,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
                        return -ENOMEM;
        }
 
-       err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap);
+       if (attrs[XFRMA_IF_ID])
+               if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+
+       err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id);
 
        kfree(encap);
 
index cc0648e..4bca4b7 100644 (file)
@@ -25,7 +25,7 @@
 #include <sys/prctl.h>
 #include <unistd.h>
 
-static int install_filter(int nr, int arch, int error)
+static int install_filter(int arch, int nr, int error)
 {
        struct sock_filter filter[] = {
                BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
@@ -42,6 +42,10 @@ static int install_filter(int nr, int arch, int error)
                .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
                .filter = filter,
        };
+       if (error == -1) {
+               struct sock_filter kill = BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL);
+               filter[4] = kill;
+       }
        if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
                perror("prctl(NO_NEW_PRIVS)");
                return 1;
@@ -57,9 +61,10 @@ int main(int argc, char **argv)
 {
        if (argc < 5) {
                fprintf(stderr, "Usage:\n"
-                       "dropper <syscall_nr> <arch> <errno> <prog> [<args>]\n"
+                       "dropper <arch> <syscall_nr> <errno> <prog> [<args>]\n"
                        "Hint:  AUDIT_ARCH_I386: 0x%X\n"
                        "       AUDIT_ARCH_X86_64: 0x%X\n"
+                       "       errno == -1 means SECCOMP_RET_KILL\n"
                        "\n", AUDIT_ARCH_I386, AUDIT_ARCH_X86_64);
                return 1;
        }
index d538255..8be8928 100644 (file)
@@ -51,6 +51,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
 KBUILD_CFLAGS += -Wno-format-zero-length
 KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
 KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
 endif
 
 endif
index 59717be..d3c3a61 100644 (file)
@@ -979,10 +979,10 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
 
        fprintf(out, "\n$(deps_config): ;\n");
 
-       if (ferror(out)) /* error check for all fprintf() calls */
-               return -1;
-
+       ret = ferror(out); /* error check for all fprintf() calls */
        fclose(out);
+       if (ret)
+               return -1;
 
        if (rename(tmp, name)) {
                perror("rename");
@@ -994,14 +994,19 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
 
 static int conf_touch_deps(void)
 {
-       const char *name;
+       const char *name, *tmp;
        struct symbol *sym;
        int res, i;
 
-       strcpy(depfile_path, "include/config/");
-       depfile_prefix_len = strlen(depfile_path);
-
        name = conf_get_autoconfig_name();
+       tmp = strrchr(name, '/');
+       depfile_prefix_len = tmp ? tmp - name + 1 : 0;
+       if (depfile_prefix_len + 1 > sizeof(depfile_path))
+               return -1;
+
+       strncpy(depfile_path, name, depfile_prefix_len);
+       depfile_path[depfile_prefix_len] = 0;
+
        conf_read_simple(name, S_DEF_AUTO);
        sym_calc_value(modules_sym);
 
@@ -1093,10 +1098,10 @@ static int __conf_write_autoconf(const char *filename,
                        print_symbol(file, sym);
 
        /* check possible errors in conf_write_heading() and print_symbol() */
-       if (ferror(file))
-               return -1;
-
+       ret = ferror(file);
        fclose(file);
+       if (ret)
+               return -1;
 
        if (rename(tmp, filename)) {
                perror("rename");
index 0590f86..748da57 100644 (file)
@@ -141,7 +141,7 @@ static char *do_lineno(int argc, char *argv[])
 static char *do_shell(int argc, char *argv[])
 {
        FILE *p;
-       char buf[256];
+       char buf[4096];
        char *cmd;
        size_t nread;
        int i;
index 23240d7..895f4b9 100644 (file)
@@ -109,22 +109,25 @@ int asymmetric_verify(struct key *keyring, const char *sig,
 
        pk = asymmetric_key_public_key(key);
        pks.pkey_algo = pk->pkey_algo;
-       if (!strcmp(pk->pkey_algo, "rsa"))
+       if (!strcmp(pk->pkey_algo, "rsa")) {
                pks.encoding = "pkcs1";
-       else if (!strncmp(pk->pkey_algo, "ecdsa-", 6))
+       } else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) {
                /* edcsa-nist-p192 etc. */
                pks.encoding = "x962";
-       else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
-                  !strcmp(pk->pkey_algo, "sm2"))
+       else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
+                  !strcmp(pk->pkey_algo, "sm2")) {
                pks.encoding = "raw";
-       else
-               return -ENOPKG;
+       } else {
+               ret = -ENOPKG;
+               goto out;
+       }
 
        pks.digest = (u8 *)data;
        pks.digest_size = datalen;
        pks.s = hdr->sig;
        pks.s_size = siglen;
        ret = verify_signature(key, &pks);
+out:
        key_put(key);
        pr_debug("%s() = %d\n", __func__, ret);
        return ret;
index 3d8e9d5..3ad8f77 100644 (file)
@@ -496,12 +496,12 @@ int __init ima_fs_init(void)
 
        return 0;
 out:
+       securityfs_remove(ima_policy);
        securityfs_remove(violations);
        securityfs_remove(runtime_measurements_count);
        securityfs_remove(ascii_runtime_measurements);
        securityfs_remove(binary_runtime_measurements);
        securityfs_remove(ima_symlink);
        securityfs_remove(ima_dir);
-       securityfs_remove(ima_policy);
        return -1;
 }
index 320ca80..2a1f641 100644 (file)
@@ -1967,6 +1967,14 @@ int ima_policy_show(struct seq_file *m, void *v)
 
        rcu_read_lock();
 
+       /* Do not print rules with inactive LSM labels */
+       for (i = 0; i < MAX_LSM_RULES; i++) {
+               if (entry->lsm[i].args_p && !entry->lsm[i].rule) {
+                       rcu_read_unlock();
+                       return 0;
+               }
+       }
+
        if (entry->action & MEASURE)
                seq_puts(m, pt(Opt_measure));
        if (entry->action & DONT_MEASURE)
index 6945603..db1ad6d 100644 (file)
@@ -29,6 +29,7 @@ static struct ima_template_desc builtin_templates[] = {
 
 static LIST_HEAD(defined_templates);
 static DEFINE_SPINLOCK(template_list);
+static int template_setup_done;
 
 static const struct ima_template_field supported_fields[] = {
        {.field_id = "d", .field_init = ima_eventdigest_init,
@@ -101,10 +102,11 @@ static int __init ima_template_setup(char *str)
        struct ima_template_desc *template_desc;
        int template_len = strlen(str);
 
-       if (ima_template)
+       if (template_setup_done)
                return 1;
 
-       ima_init_template_list();
+       if (!ima_template)
+               ima_init_template_list();
 
        /*
         * Verify that a template with the supplied name exists.
@@ -128,6 +130,7 @@ static int __init ima_template_setup(char *str)
        }
 
        ima_template = template_desc;
+       template_setup_done = 1;
        return 1;
 }
 __setup("ima_template=", ima_template_setup);
@@ -136,7 +139,7 @@ static int __init ima_template_fmt_setup(char *str)
 {
        int num_templates = ARRAY_SIZE(builtin_templates);
 
-       if (ima_template)
+       if (template_setup_done)
                return 1;
 
        if (template_desc_init_fields(str, NULL, NULL) < 0) {
@@ -147,6 +150,7 @@ static int __init ima_template_fmt_setup(char *str)
 
        builtin_templates[num_templates - 1].fmt = str;
        ima_template = builtin_templates + num_templates - 1;
+       template_setup_done = 1;
 
        return 1;
 }
index 2922005..0ec5e4c 100644 (file)
@@ -45,6 +45,8 @@ void integrity_audit_message(int audit_msgno, struct inode *inode,
                return;
 
        ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno);
+       if (!ab)
+               return;
        audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
                         task_pid_nr(current),
                         from_kuid(&init_user_ns, current_uid()),
index 727c4e4..ff7aea6 100644 (file)
@@ -77,7 +77,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
        size_t policy_len;
        int rc = 0;
 
-       WARN_ON(!mutex_is_locked(&state->policy_mutex));
+       lockdep_assert_held(&state->policy_mutex);
 
        state_str = selinux_ima_collect_state(state);
        if (!state_str) {
@@ -117,7 +117,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
  */
 void selinux_ima_measure_state(struct selinux_state *state)
 {
-       WARN_ON(mutex_is_locked(&state->policy_mutex));
+       lockdep_assert_not_held(&state->policy_mutex);
 
        mutex_lock(&state->policy_mutex);
        selinux_ima_measure_state_locked(state);
index d1fcd1d..6fd763d 100644 (file)
@@ -511,7 +511,8 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
                                      DEFAULT_GFP, 0);
        if (!sgt)
                return NULL;
-       dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+       dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
+                                           sg_dma_address(sgt->sgl));
        p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
        if (p)
                dmab->private_data = sgt;
@@ -540,9 +541,9 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
        if (mode == SNDRV_DMA_SYNC_CPU) {
                if (dmab->dev.dir == DMA_TO_DEVICE)
                        return;
+               invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
                dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
                                         dmab->dev.dir);
-               invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
        } else {
                if (dmab->dev.dir == DMA_FROM_DEVICE)
                        return;
@@ -671,9 +672,13 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
  */
 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
 {
-       dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
-       return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
-                                    dmab->dev.dir, DEFAULT_GFP);
+       void *p;
+
+       p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+                                 dmab->dev.dir, DEFAULT_GFP);
+       if (p)
+               dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
+       return p;
 }
 
 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
index 4b0338c..572ff0d 100644 (file)
@@ -1615,6 +1615,7 @@ static const struct snd_pci_quirk probe_mask_list[] = {
        /* forced codec slots */
        SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
        SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
+       SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105),
        /* WinFast VP200 H (Teradici) user reported broken communication */
        SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101),
        {}
@@ -1798,8 +1799,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
 
        assign_position_fix(chip, check_position_fix(chip, position_fix[dev]));
 
-       check_probe_mask(chip, dev);
-
        if (single_cmd < 0) /* allow fallback to single_cmd at errors */
                chip->fallback_to_single_cmd = 1;
        else /* explicitly set to single_cmd or not */
@@ -1825,6 +1824,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
                chip->bus.core.needs_damn_long_delay = 1;
        }
 
+       check_probe_mask(chip, dev);
+
        err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
        if (err < 0) {
                dev_err(card->dev, "Error creating device [card]!\n");
@@ -1940,6 +1941,7 @@ static int azx_first_init(struct azx *chip)
                dma_bits = 32;
        if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(dma_bits)))
                dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
+       dma_set_max_seg_size(&pci->dev, UINT_MAX);
 
        /* read number of streams from GCAP register instead of using
         * hardcoded value
index 8315bf7..3a42457 100644 (file)
@@ -138,6 +138,22 @@ struct alc_spec {
  * COEF access helper functions
  */
 
+static void coef_mutex_lock(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       snd_hda_power_up_pm(codec);
+       mutex_lock(&spec->coef_mutex);
+}
+
+static void coef_mutex_unlock(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       mutex_unlock(&spec->coef_mutex);
+       snd_hda_power_down_pm(codec);
+}
+
 static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
                                 unsigned int coef_idx)
 {
@@ -151,12 +167,11 @@ static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
 static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
                               unsigned int coef_idx)
 {
-       struct alc_spec *spec = codec->spec;
        unsigned int val;
 
-       mutex_lock(&spec->coef_mutex);
+       coef_mutex_lock(codec);
        val = __alc_read_coefex_idx(codec, nid, coef_idx);
-       mutex_unlock(&spec->coef_mutex);
+       coef_mutex_unlock(codec);
        return val;
 }
 
@@ -173,11 +188,9 @@ static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
 static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
                                 unsigned int coef_idx, unsigned int coef_val)
 {
-       struct alc_spec *spec = codec->spec;
-
-       mutex_lock(&spec->coef_mutex);
+       coef_mutex_lock(codec);
        __alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
-       mutex_unlock(&spec->coef_mutex);
+       coef_mutex_unlock(codec);
 }
 
 #define alc_write_coef_idx(codec, coef_idx, coef_val) \
@@ -198,11 +211,9 @@ static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
                                  unsigned int coef_idx, unsigned int mask,
                                  unsigned int bits_set)
 {
-       struct alc_spec *spec = codec->spec;
-
-       mutex_lock(&spec->coef_mutex);
+       coef_mutex_lock(codec);
        __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
-       mutex_unlock(&spec->coef_mutex);
+       coef_mutex_unlock(codec);
 }
 
 #define alc_update_coef_idx(codec, coef_idx, mask, bits_set)   \
@@ -235,9 +246,7 @@ struct coef_fw {
 static void alc_process_coef_fw(struct hda_codec *codec,
                                const struct coef_fw *fw)
 {
-       struct alc_spec *spec = codec->spec;
-
-       mutex_lock(&spec->coef_mutex);
+       coef_mutex_lock(codec);
        for (; fw->nid; fw++) {
                if (fw->mask == (unsigned short)-1)
                        __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
@@ -245,7 +254,7 @@ static void alc_process_coef_fw(struct hda_codec *codec,
                        __alc_update_coefex_idx(codec, fw->nid, fw->idx,
                                                fw->mask, fw->val);
        }
-       mutex_unlock(&spec->coef_mutex);
+       coef_mutex_unlock(codec);
 }
 
 /*
@@ -9170,6 +9179,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
        SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
        SND_PCI_QUIRK(0x17aa, 0x3847, "Legion 7 16ACHG6", ALC287_FIXUP_LEGION_16ACHG6),
        SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
index fd62998..c855f50 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/gpio/consumer.h>
 
 #define EN_SPKR_GPIO_GB                0x11F
-#define EN_SPKR_GPIO_NK                0x146
 #define EN_SPKR_GPIO_NONE      -EINVAL
 
 enum be_id {
index 07de461..4cc431e 100644 (file)
@@ -37,7 +37,7 @@ static struct acp_card_drvdata sof_rt5682_max_data = {
        .hs_codec_id = RT5682,
        .amp_codec_id = MAX98360A,
        .dmic_codec_id = DMIC,
-       .gpio_spkr_en = EN_SPKR_GPIO_NK,
+       .gpio_spkr_en = EN_SPKR_GPIO_NONE,
 };
 
 static struct acp_card_drvdata sof_rt5682s_max_data = {
@@ -47,7 +47,7 @@ static struct acp_card_drvdata sof_rt5682s_max_data = {
        .hs_codec_id = RT5682S,
        .amp_codec_id = MAX98360A,
        .dmic_codec_id = DMIC,
-       .gpio_spkr_en = EN_SPKR_GPIO_NK,
+       .gpio_spkr_en = EN_SPKR_GPIO_NONE,
 };
 
 static const struct snd_kcontrol_new acp_controls[] = {
index 4aaee18..4415fb3 100644 (file)
@@ -150,7 +150,6 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
        SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1,
                                6, 1, 0),
        SOC_ENUM("C Data Access", cam_mode_enum),
-       SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1),
        SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
                                3, 1, 0),
        SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
@@ -186,7 +185,7 @@ static const struct snd_soc_dapm_widget cs4265_dapm_widgets[] = {
 
        SND_SOC_DAPM_SWITCH("Loopback", SND_SOC_NOPM, 0, 0,
                        &loopback_ctl),
-       SND_SOC_DAPM_SWITCH("SPDIF", SND_SOC_NOPM, 0, 0,
+       SND_SOC_DAPM_SWITCH("SPDIF", CS4265_SPDIF_CTL2, 5, 1,
                        &spdif_switch),
        SND_SOC_DAPM_SWITCH("DAC", CS4265_PWRCTL, 1, 1,
                        &dac_switch),
index fb09715..5b12cbf 100644 (file)
@@ -1022,11 +1022,13 @@ static void rt5668_jack_detect_handler(struct work_struct *work)
                container_of(work, struct rt5668_priv, jack_detect_work.work);
        int val, btn_type;
 
-       while (!rt5668->component)
-               usleep_range(10000, 15000);
-
-       while (!rt5668->component->card->instantiated)
-               usleep_range(10000, 15000);
+       if (!rt5668->component || !rt5668->component->card ||
+           !rt5668->component->card->instantiated) {
+               /* card not yet ready, try later */
+               mod_delayed_work(system_power_efficient_wq,
+                                &rt5668->jack_detect_work, msecs_to_jiffies(15));
+               return;
+       }
 
        mutex_lock(&rt5668->calibrate_mutex);
 
index 0a0ec4a..be68d57 100644 (file)
@@ -1092,11 +1092,13 @@ void rt5682_jack_detect_handler(struct work_struct *work)
        struct snd_soc_dapm_context *dapm;
        int val, btn_type;
 
-       while (!rt5682->component)
-               usleep_range(10000, 15000);
-
-       while (!rt5682->component->card->instantiated)
-               usleep_range(10000, 15000);
+       if (!rt5682->component || !rt5682->component->card ||
+           !rt5682->component->card->instantiated) {
+               /* card not yet ready, try later */
+               mod_delayed_work(system_power_efficient_wq,
+                                &rt5682->jack_detect_work, msecs_to_jiffies(15));
+               return;
+       }
 
        dapm = snd_soc_component_get_dapm(rt5682->component);
 
index efa1016..1e662d1 100644 (file)
@@ -824,11 +824,13 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
                container_of(work, struct rt5682s_priv, jack_detect_work.work);
        int val, btn_type;
 
-       while (!rt5682s->component)
-               usleep_range(10000, 15000);
-
-       while (!rt5682s->component->card->instantiated)
-               usleep_range(10000, 15000);
+       if (!rt5682s->component || !rt5682s->component->card ||
+           !rt5682s->component->card->instantiated) {
+               /* card not yet ready, try later */
+               mod_delayed_work(system_power_efficient_wq,
+                                &rt5682s->jack_detect_work, msecs_to_jiffies(15));
+               return;
+       }
 
        mutex_lock(&rt5682s->jdet_mutex);
        mutex_lock(&rt5682s->calibrate_mutex);
index 6549e7f..c5ea3b1 100644 (file)
@@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
                gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
                msleep(20);
                gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
+               usleep_range(1000, 2000);
        }
 
        snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
                TAS2770_RST);
+       usleep_range(1000, 2000);
 }
 
 static int tas2770_set_bias_level(struct snd_soc_component *component,
@@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
 
        if (tas2770->sdz_gpio) {
                gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+               usleep_range(1000, 2000);
        } else {
                ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
                                                    TAS2770_PWR_CTRL_MASK,
@@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
 
        tas2770->component = component;
 
-       if (tas2770->sdz_gpio)
+       if (tas2770->sdz_gpio) {
                gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+               usleep_range(1000, 2000);
+       }
 
        tas2770_reset(tas2770);
 
index f3672e3..0582585 100644 (file)
@@ -1441,7 +1441,8 @@ static int wm_adsp_buffer_parse_coeff(struct cs_dsp_coeff_ctl *cs_ctl)
        int ret, i;
 
        for (i = 0; i < 5; ++i) {
-               ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1, sizeof(coeff_v1));
+               ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1,
+                                            min(cs_ctl->len, sizeof(coeff_v1)));
                if (ret < 0)
                        return ret;
 
index 148ddf4..aeca582 100644 (file)
@@ -952,6 +952,7 @@ static int skl_first_init(struct hdac_bus *bus)
        /* allow 64bit DMA address if supported by H/W */
        if (dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(64)))
                dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(32));
+       dma_set_max_seg_size(bus->dev, UINT_MAX);
 
        /* initialize streams */
        snd_hdac_ext_stream_init_all
index a59e9d2..4b1773c 100644 (file)
@@ -524,7 +524,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        return -EINVAL;
                }
 
-               ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
+               ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
                if (ret) {
                        dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
                        return ret;
@@ -665,7 +665,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
        return -EINVAL;
        }
        if (interrupts & LPAIF_IRQ_PER(chan)) {
-               rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
+               rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
                if (rv) {
                        dev_err(soc_runtime->dev,
                                "error writing to irqclear reg: %d\n", rv);
@@ -676,7 +676,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
        }
 
        if (interrupts & LPAIF_IRQ_XRUN(chan)) {
-               rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
+               rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
                if (rv) {
                        dev_err(soc_runtime->dev,
                                "error writing to irqclear reg: %d\n", rv);
@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
        }
 
        if (interrupts & LPAIF_IRQ_ERR(chan)) {
-               rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
+               rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
                if (rv) {
                        dev_err(soc_runtime->dev,
                                "error writing to irqclear reg: %d\n", rv);
index 9833611..a0ca58b 100644 (file)
@@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
        unsigned int sign_bit = mc->sign_bit;
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
-       int err;
+       int err, ret;
        bool type_2r = false;
        unsigned int val2 = 0;
        unsigned int val, val_mask;
@@ -319,7 +319,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
        if (ucontrol->value.integer.value[0] < 0)
                return -EINVAL;
        val = ucontrol->value.integer.value[0];
-       if (mc->platform_max && val > mc->platform_max)
+       if (mc->platform_max && ((int)val + min) > mc->platform_max)
                return -EINVAL;
        if (val > max - min)
                return -EINVAL;
@@ -332,7 +332,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
                if (ucontrol->value.integer.value[1] < 0)
                        return -EINVAL;
                val2 = ucontrol->value.integer.value[1];
-               if (mc->platform_max && val2 > mc->platform_max)
+               if (mc->platform_max && ((int)val2 + min) > mc->platform_max)
                        return -EINVAL;
                if (val2 > max - min)
                        return -EINVAL;
@@ -350,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
        err = snd_soc_component_update_bits(component, reg, val_mask, val);
        if (err < 0)
                return err;
+       ret = err;
 
-       if (type_2r)
+       if (type_2r) {
                err = snd_soc_component_update_bits(component, reg2, val_mask,
-                       val2);
+                                                   val2);
+               /* Don't discard any error code or drop change flag */
+               if (ret == 0 || err < 0) {
+                       ret = err;
+               }
+       }
 
-       return err;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
 
@@ -421,6 +427,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
        int min = mc->min;
        unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
        int err = 0;
+       int ret;
        unsigned int val, val_mask;
 
        if (ucontrol->value.integer.value[0] < 0)
@@ -437,6 +444,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
        err = snd_soc_component_update_bits(component, reg, val_mask, val);
        if (err < 0)
                return err;
+       ret = err;
 
        if (snd_soc_volsw_is_stereo(mc)) {
                unsigned int val2;
@@ -447,6 +455,11 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
 
                err = snd_soc_component_update_bits(component, reg2, val_mask,
                        val2);
+
+               /* Don't discard any error code or drop change flag */
+               if (ret == 0 || err < 0) {
+                       ret = err;
+               }
        }
        return err;
 }
@@ -506,7 +519,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
        unsigned int val, val_mask;
-       int ret;
+       int err, ret;
 
        if (invert)
                val = (max - ucontrol->value.integer.value[0]) & mask;
@@ -515,9 +528,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
        val_mask = mask << shift;
        val = val << shift;
 
-       ret = snd_soc_component_update_bits(component, reg, val_mask, val);
-       if (ret < 0)
-               return ret;
+       err = snd_soc_component_update_bits(component, reg, val_mask, val);
+       if (err < 0)
+               return err;
+       ret = err;
 
        if (snd_soc_volsw_is_stereo(mc)) {
                if (invert)
@@ -527,8 +541,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
                val_mask = mask << shift;
                val = val << shift;
 
-               ret = snd_soc_component_update_bits(component, rreg, val_mask,
+               err = snd_soc_component_update_bits(component, rreg, val_mask,
                        val);
+               /* Don't discard any error code or drop change flag */
+               if (ret == 0 || err < 0) {
+                       ret = err;
+               }
        }
 
        return ret;
@@ -877,6 +895,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
        unsigned long mask = (1UL<<mc->nbits)-1;
        long max = mc->max;
        long val = ucontrol->value.integer.value[0];
+       int ret = 0;
        unsigned int i;
 
        if (val < mc->min || val > mc->max)
@@ -891,9 +910,11 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
                                                        regmask, regval);
                if (err < 0)
                        return err;
+               if (err > 0)
+                       ret = err;
        }
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
 
index c8fb082..1385695 100644 (file)
@@ -956,6 +956,7 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
                dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
                dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
        }
+       dma_set_max_seg_size(&pci->dev, UINT_MAX);
 
        /* init streams */
        ret = hda_dsp_stream_init(sdev);
index 70319c8..2d444ec 100644 (file)
@@ -47,13 +47,13 @@ struct snd_usb_implicit_fb_match {
 static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
        /* Generic matching */
        IMPLICIT_FB_GENERIC_DEV(0x0499, 0x1509), /* Steinberg UR22 */
-       IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2080), /* M-Audio FastTrack Ultra */
-       IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2081), /* M-Audio FastTrack Ultra */
        IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2030), /* M-Audio Fast Track C400 */
        IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2031), /* M-Audio Fast Track C600 */
 
        /* Fixed EP */
        /* FIXME: check the availability of generic matching */
+       IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */
+       IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */
        IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */
        IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */
        IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
index 630766b..a564195 100644 (file)
@@ -3678,17 +3678,14 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
                                err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
                                                        cval->cache_val[idx]);
                                if (err < 0)
-                                       return err;
+                                       break;
                        }
                        idx++;
                }
        } else {
                /* master */
-               if (cval->cached) {
-                       err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
-                       if (err < 0)
-                               return err;
-               }
+               if (cval->cached)
+                       snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
        }
 
        return 0;
index 1c94eaf..4a3ff64 100644 (file)
@@ -1261,7 +1261,7 @@ static int had_pcm_mmap(struct snd_pcm_substream *substream,
 {
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        return remap_pfn_range(vma, vma->vm_start,
-                       substream->dma_buffer.addr >> PAGE_SHIFT,
+                       substream->runtime->dma_addr >> PAGE_SHIFT,
                        vma->vm_end - vma->vm_start, vma->vm_page_prot);
 }
 
index 6db4e29..ab4e537 100644 (file)
 /* FREE!                                ( 7*32+10) */
 #define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
 #define X86_FEATURE_RETPOLINE          ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE   ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
index 3faf0f9..a4a39c3 100644 (file)
 #define MSR_AMD64_ICIBSEXTDCTL         0xc001103c
 #define MSR_AMD64_IBSOPDATA4           0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL    0xc001011b
 #define MSR_AMD64_VM_PAGE_FLUSH                0xc001011e
 #define MSR_AMD64_SEV_ES_GHCB          0xc0010130
 #define MSR_AMD64_SEV                  0xc0010131
index 1600b17..1d3a90d 100644 (file)
@@ -11,7 +11,7 @@ from drgn.helpers.linux import list_for_each_entry, list_empty
 from drgn.helpers.linux import for_each_page
 from drgn.helpers.linux.cpumask import for_each_online_cpu
 from drgn.helpers.linux.percpu import per_cpu_ptr
-from drgn import container_of, FaultError, Object
+from drgn import container_of, FaultError, Object, cast
 
 
 DESC = """
@@ -69,15 +69,15 @@ def oo_objects(s):
 
 
 def count_partial(n, fn):
-    nr_pages = 0
-    for page in list_for_each_entry('struct page', n.partial.address_of_(),
-                                    'lru'):
-         nr_pages += fn(page)
-    return nr_pages
+    nr_objs = 0
+    for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
+                                    'slab_list'):
+         nr_objs += fn(slab)
+    return nr_objs
 
 
-def count_free(page):
-    return page.objects - page.inuse
+def count_free(slab):
+    return slab.objects - slab.inuse
 
 
 def slub_get_slabinfo(s, cfg):
@@ -145,14 +145,14 @@ def detect_kernel_config():
     return cfg
 
 
-def for_each_slab_page(prog):
+def for_each_slab(prog):
     PGSlab = 1 << prog.constant('PG_slab')
     PGHead = 1 << prog.constant('PG_head')
 
     for page in for_each_page(prog):
         try:
             if page.flags.value_() & PGSlab:
-                yield page
+                yield cast('struct slab *', page)
         except FaultError:
             pass
 
@@ -190,13 +190,13 @@ def main():
                                        'list'):
             obj_cgroups.add(ptr.value_())
 
-        # look over all slab pages, belonging to non-root memcgs
-        # and look for objects belonging to the given memory cgroup
-        for page in for_each_slab_page(prog):
-            objcg_vec_raw = page.memcg_data.value_()
+        # look over all slab folios and look for objects belonging
+        # to the given memory cgroup
+        for slab in for_each_slab(prog):
+            objcg_vec_raw = slab.memcg_data.value_()
             if objcg_vec_raw == 0:
                 continue
-            cache = page.slab_cache
+            cache = slab.slab_cache
             if not cache:
                 continue
             addr = cache.value_()
index 5191b57..507ee1f 100644 (file)
@@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_VM_GPA_BITS 207
 #define KVM_CAP_XSAVE2 208
 #define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 1b65042..82858b6 100644 (file)
@@ -465,6 +465,8 @@ struct perf_event_attr {
        /*
         * User provided data if sigtrap=1, passed back to user via
         * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+        * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+        * truncated accordingly on 32 bit architectures.
         */
        __u64   sig_data;
 };
index 581f9ff..1973a18 100644 (file)
@@ -3,11 +3,7 @@
 #define __LIBPERF_INTERNAL_CPUMAP_H
 
 #include <linux/refcount.h>
-
-/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
-struct perf_cpu {
-       int cpu;
-};
+#include <perf/cpumap.h>
 
 /**
  * A sized, reference counted, sorted array of integers representing CPU
index 15b8faa..4a2edbd 100644 (file)
@@ -7,6 +7,11 @@
 #include <stdio.h>
 #include <stdbool.h>
 
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+       int cpu;
+};
+
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
index 93696af..6fa0d65 100644 (file)
@@ -2,6 +2,7 @@ LIBPERF_0.0.1 {
        global:
                libperf_init;
                perf_cpu_map__dummy_new;
+               perf_cpu_map__default_new;
                perf_cpu_map__get;
                perf_cpu_map__put;
                perf_cpu_map__new;
index d39378e..87b0510 100644 (file)
@@ -14,6 +14,8 @@ static int libperf_print(enum libperf_print_level level,
 int test_cpumap(int argc, char **argv)
 {
        struct perf_cpu_map *cpus;
+       struct perf_cpu cpu;
+       int idx;
 
        __T_START;
 
@@ -27,6 +29,15 @@ int test_cpumap(int argc, char **argv)
        perf_cpu_map__put(cpus);
        perf_cpu_map__put(cpus);
 
+       cpus = perf_cpu_map__default_new();
+       if (!cpus)
+               return -1;
+
+       perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+               __T("wrong cpu number", cpu.cpu != -1);
+
+       perf_cpu_map__put(cpus);
+
        __T_END;
        return tests_failed == 0 ? 0 : -1;
 }
index b3479df..fa854c8 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
 #include <sched.h>
 #include <stdio.h>
 #include <stdarg.h>
@@ -526,12 +527,12 @@ static int test_stat_multiplexing(void)
 
        min = counts[0].val;
        for (i = 0; i < EVENT_NUM; i++) {
-               __T_VERBOSE("Event %2d -- Raw count = %lu, run = %lu, enable = %lu\n",
+               __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
                            i, counts[i].val, counts[i].run, counts[i].ena);
 
                perf_counts_values__scale(&counts[i], true, &scaled);
                if (scaled == 1) {
-                       __T_VERBOSE("\t Scaled count = %lu (%.2lf%%, %lu/%lu)\n",
+                       __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
                                    counts[i].val,
                                    (double)counts[i].run / (double)counts[i].ena * 100.0,
                                    counts[i].run, counts[i].ena);
index 794a375..b2aec04 100644 (file)
@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
 static inline void *xrealloc(void *ptr, size_t size)
 {
        void *ret = realloc(ptr, size);
-       if (!ret && !size)
-               ret = realloc(ptr, 1);
-       if (!ret) {
-               ret = realloc(ptr, size);
-               if (!ret && !size)
-                       ret = realloc(ptr, 1);
-               if (!ret)
-                       die("Out of memory, realloc failed");
-       }
+       if (!ret)
+               die("Out of memory, realloc failed");
        return ret;
 }
 
index abae818..fa478dd 100644 (file)
@@ -463,7 +463,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
                return -EINVAL;
 
        if (PRINT_FIELD(WEIGHT) &&
-           evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT))
+           evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(SYM) &&
index 32844d8..52b137a 100644 (file)
@@ -1536,13 +1536,20 @@ static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
        return fprintf(fp, "         ? ");
 }
 
+static pid_t workload_pid = -1;
 static bool done = false;
 static bool interrupted = false;
 
-static void sig_handler(int sig)
+static void sighandler_interrupt(int sig __maybe_unused)
 {
-       done = true;
-       interrupted = sig == SIGINT;
+       done = interrupted = true;
+}
+
+static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
+                           void *context __maybe_unused)
+{
+       if (info->si_pid == workload_pid)
+               done = true;
 }
 
 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
@@ -3938,7 +3945,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        bool draining = false;
 
        trace->live = true;
-       signal(SIGCHLD, sig_handler);
 
        if (!trace->raw_augmented_syscalls) {
                if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
@@ -4018,6 +4024,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                        fprintf(trace->output, "Couldn't run the workload!\n");
                        goto out_delete_evlist;
                }
+               workload_pid = evlist->workload.pid;
        }
 
        err = evlist__open(evlist);
@@ -4887,10 +4894,16 @@ int cmd_trace(int argc, const char **argv)
        const char * const trace_subcommands[] = { "record", NULL };
        int err = -1;
        char bf[BUFSIZ];
+       struct sigaction sigchld_act;
 
        signal(SIGSEGV, sighandler_dump_stack);
        signal(SIGFPE, sighandler_dump_stack);
-       signal(SIGINT, sig_handler);
+       signal(SIGINT, sighandler_interrupt);
+
+       memset(&sigchld_act, 0, sizeof(sigchld_act));
+       sigchld_act.sa_flags = SA_SIGINFO;
+       sigchld_act.sa_sigaction = sighandler_chld;
+       sigaction(SIGCHLD, &sigchld_act, NULL);
 
        trace.evlist = evlist__new();
        trace.sctbl = syscalltbl__new();
index a36f49f..1116fc6 100644 (file)
@@ -45,8 +45,10 @@ Following tests are defined (with perf commands):
   perf record -d kill                           (test-record-data)
   perf record -F 100 kill                       (test-record-freq)
   perf record -g kill                           (test-record-graph-default)
+  perf record -g kill                           (test-record-graph-default-aarch64)
   perf record --call-graph dwarf kill          (test-record-graph-dwarf)
   perf record --call-graph fp kill              (test-record-graph-fp)
+  perf record --call-graph fp kill              (test-record-graph-fp-aarch64)
   perf record --group -e cycles,instructions kill (test-record-group)
   perf record -e '{cycles,instructions}' kill   (test-record-group1)
   perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
index 5d8234d..f0a18b4 100644 (file)
@@ -2,6 +2,8 @@
 command = record
 args    = --no-bpf-event -g kill >/dev/null 2>&1
 ret     = 1
+# arm64 enables registers in the default mode (fp)
+arch    = !aarch64
 
 [event:base-record]
 sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-default-aarch64 b/tools/perf/tests/attr/test-record-graph-default-aarch64
new file mode 100644 (file)
index 0000000..e98d62e
--- /dev/null
@@ -0,0 +1,9 @@
+[config]
+command = record
+args    = --no-bpf-event -g kill >/dev/null 2>&1
+ret     = 1
+arch    = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
index 5630521..a6e60e8 100644 (file)
@@ -2,6 +2,8 @@
 command = record
 args    = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
 ret     = 1
+# arm64 enables registers in fp mode
+arch    = !aarch64
 
 [event:base-record]
 sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-fp-aarch64 b/tools/perf/tests/attr/test-record-graph-fp-aarch64
new file mode 100644 (file)
index 0000000..cbeea99
--- /dev/null
@@ -0,0 +1,9 @@
+[config]
+command = record
+args    = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret     = 1
+arch    = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
index 1f147fe..e32ece9 100644 (file)
 #include "tests.h"
 #include "../perf-sys.h"
 
-/*
- * PowerPC and S390 do not support creation of instruction breakpoints using the
- * perf_event interface.
- *
- * Just disable the test for these architectures until these issues are
- * resolved.
- */
-#if defined(__powerpc__) || defined(__s390x__)
-#define BP_ACCOUNT_IS_SUPPORTED 0
-#else
-#define BP_ACCOUNT_IS_SUPPORTED 1
-#endif
-
 #define NUM_THREADS 5
 
 static struct {
@@ -135,7 +122,7 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m
        char sbuf[STRERR_BUFSIZE];
        int i, fd, ret = TEST_FAIL;
 
-       if (!BP_ACCOUNT_IS_SUPPORTED) {
+       if (!BP_SIGNAL_IS_SUPPORTED) {
                pr_debug("Test not supported on this architecture");
                return TEST_SKIP;
        }
index 7ecfaac..16ec605 100644 (file)
@@ -1220,9 +1220,10 @@ bpf__obj_config_map(struct bpf_object *obj,
        pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
        err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
 out:
-       free(map_name);
        if (!err)
                *key_scan_pos += strlen(map_opt);
+
+       free(map_name);
        return err;
 }
 
index 4f672f7..8b95fb3 100644 (file)
@@ -50,8 +50,6 @@ struct cs_etm_auxtrace {
        u8 timeless_decoding;
        u8 snapshot_mode;
        u8 data_queued;
-       u8 sample_branches;
-       u8 sample_instructions;
 
        int num_cpu;
        u64 latest_kernel_timestamp;
@@ -410,8 +408,8 @@ static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
 {
        struct cs_etm_packet *tmp;
 
-       if (etm->sample_branches || etm->synth_opts.last_branch ||
-           etm->sample_instructions) {
+       if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
+           etm->synth_opts.instructions) {
                /*
                 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
                 * the next incoming packet.
@@ -1365,7 +1363,6 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
                err = cs_etm__synth_event(session, &attr, id);
                if (err)
                        return err;
-               etm->sample_branches = true;
                etm->branches_sample_type = attr.sample_type;
                etm->branches_id = id;
                id += 1;
@@ -1389,7 +1386,6 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
                err = cs_etm__synth_event(session, &attr, id);
                if (err)
                        return err;
-               etm->sample_instructions = true;
                etm->instructions_sample_type = attr.sample_type;
                etm->instructions_id = id;
                id += 1;
@@ -1420,7 +1416,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
            tidq->prev_packet->last_instr_taken_branch)
                cs_etm__update_last_branch_rb(etmq, tidq);
 
-       if (etm->sample_instructions &&
+       if (etm->synth_opts.instructions &&
            tidq->period_instructions >= etm->instructions_sample_period) {
                /*
                 * Emit instruction sample periodically
@@ -1503,7 +1499,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
                }
        }
 
-       if (etm->sample_branches) {
+       if (etm->synth_opts.branches) {
                bool generate_sample = false;
 
                /* Generate sample for tracing on packet */
@@ -1557,6 +1553,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
                goto swap_packet;
 
        if (etmq->etm->synth_opts.last_branch &&
+           etmq->etm->synth_opts.instructions &&
            tidq->prev_packet->sample_type == CS_ETM_RANGE) {
                u64 addr;
 
@@ -1582,7 +1579,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
 
        }
 
-       if (etm->sample_branches &&
+       if (etm->synth_opts.branches &&
            tidq->prev_packet->sample_type == CS_ETM_RANGE) {
                err = cs_etm__synth_branch_sample(etmq, tidq);
                if (err)
@@ -1614,6 +1611,7 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq,
         * the trace.
         */
        if (etmq->etm->synth_opts.last_branch &&
+           etmq->etm->synth_opts.instructions &&
            tidq->prev_packet->sample_type == CS_ETM_RANGE) {
                u64 addr;
 
index f5d260b..15a4547 100644 (file)
@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr)
        if (!files)
                return -ENOMEM;
 
-       data->dir.version = PERF_DIR_VERSION;
-       data->dir.files   = files;
-       data->dir.nr      = nr;
-
        for (i = 0; i < nr; i++) {
                struct perf_data_file *file = &files[i];
 
@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr)
                file->fd = ret;
        }
 
+       data->dir.version = PERF_DIR_VERSION;
+       data->dir.files   = files;
+       data->dir.nr      = nr;
        return 0;
 
 out_err:
index 7f23421..57f02be 100644 (file)
@@ -154,8 +154,8 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
                perf_cpu_map__put(matched_cpus);
                perf_cpu_map__put(unmatched_cpus);
        }
-
-       ret = (unmatched_count == events_nr) ? -1 : 0;
+       if (events_nr)
+               ret = (unmatched_count == events_nr) ? -1 : 0;
 out:
        perf_cpu_map__put(cpus);
        return ret;
index 4f32133..13d854a 100755 (executable)
@@ -61,7 +61,7 @@ def main(argv: Sequence[str]) -> None:
                elif isinstance(ex, subprocess.CalledProcessError):
                        print(f'{name}: FAILED')
                else:
-                       print('{name}: unexpected exception: {ex}')
+                       print(f'{name}: unexpected exception: {ex}')
                        continue
 
                output = ex.output
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
new file mode 100644 (file)
index 0000000..f74b823
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "timer_crash.skel.h"
+
+enum {
+       MODE_ARRAY,
+       MODE_HASH,
+};
+
+static void test_timer_crash_mode(int mode)
+{
+       struct timer_crash *skel;
+
+       skel = timer_crash__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
+               return;
+       skel->bss->pid = getpid();
+       skel->bss->crash_map = mode;
+       if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
+               goto end;
+       usleep(1);
+end:
+       timer_crash__destroy(skel);
+}
+
+void test_timer_crash(void)
+{
+       if (test__start_subtest("array"))
+               test_timer_crash_mode(MODE_ARRAY);
+       if (test__start_subtest("hash"))
+               test_timer_crash_mode(MODE_HASH);
+}
index 2966564..6c85b00 100644 (file)
@@ -235,7 +235,7 @@ SEC("sk_msg1")
 int bpf_prog4(struct sk_msg_md *msg)
 {
        int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
-       int *start, *end, *start_push, *end_push, *start_pop, *pop;
+       int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
 
        bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
        if (bytes)
@@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg)
                bpf_msg_pull_data(msg, *start, *end, 0);
        start_push = bpf_map_lookup_elem(&sock_bytes, &two);
        end_push = bpf_map_lookup_elem(&sock_bytes, &three);
-       if (start_push && end_push)
-               bpf_msg_push_data(msg, *start_push, *end_push, 0);
+       if (start_push && end_push) {
+               err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+               if (err)
+                       return SK_DROP;
+       }
        start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
        pop = bpf_map_lookup_elem(&sock_bytes, &five);
        if (start_pop && pop)
@@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg)
 {
        int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
        int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+       int err = 0;
        __u64 flags = 0;
 
        bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg)
 
        start_push = bpf_map_lookup_elem(&sock_bytes, &two);
        end_push = bpf_map_lookup_elem(&sock_bytes, &three);
-       if (start_push && end_push)
-               bpf_msg_push_data(msg, *start_push, *end_push, 0);
+       if (start_push && end_push) {
+               err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+               if (err)
+                       return SK_DROP;
+       }
 
        start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
        pop = bpf_map_lookup_elem(&sock_bytes, &five);
@@ -338,7 +345,7 @@ SEC("sk_msg5")
 int bpf_prog10(struct sk_msg_md *msg)
 {
        int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
-       int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+       int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
 
        bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
        if (bytes)
@@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg)
                bpf_msg_pull_data(msg, *start, *end, 0);
        start_push = bpf_map_lookup_elem(&sock_bytes, &two);
        end_push = bpf_map_lookup_elem(&sock_bytes, &three);
-       if (start_push && end_push)
-               bpf_msg_push_data(msg, *start_push, *end_push, 0);
+       if (start_push && end_push) {
+               err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+               if (err)
+                       return SK_PASS;
+       }
        start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
        pop = bpf_map_lookup_elem(&sock_bytes, &five);
        if (start_pop && pop)
diff --git a/tools/testing/selftests/bpf/progs/timer_crash.c b/tools/testing/selftests/bpf/progs/timer_crash.c
new file mode 100644 (file)
index 0000000..f8f7944
--- /dev/null
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_elem {
+       struct bpf_timer timer;
+       struct bpf_spin_lock lock;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, struct map_elem);
+} amap SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, struct map_elem);
+} hmap SEC(".maps");
+
+int pid = 0;
+int crash_map = 0; /* 0 for amap, 1 for hmap */
+
+SEC("fentry/do_nanosleep")
+int sys_enter(void *ctx)
+{
+       struct map_elem *e, value = {};
+       void *map = crash_map ? (void *)&hmap : (void *)&amap;
+
+       if (bpf_get_current_task_btf()->tgid != pid)
+               return 0;
+
+       *(void **)&value = (void *)0xdeadcaf3;
+
+       bpf_map_update_elem(map, &(int){0}, &value, 0);
+       /* For array map, doing bpf_map_update_elem will do a
+        * check_and_free_timer_in_array, which will trigger the crash if timer
+        * pointer was overwritten, for hmap we need to use bpf_timer_cancel.
+        */
+       if (crash_map == 1) {
+               e = bpf_map_lookup_elem(map, &(int){0});
+               if (!e)
+                       return 0;
+               bpf_timer_cancel(&e->timer);
+       }
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index 076cf43..cd45821 100644 (file)
@@ -126,8 +126,6 @@ static void test_clone3(uint64_t flags, size_t size, int expected,
 
 int main(int argc, char *argv[])
 {
-       pid_t pid;
-
        uid_t uid = getuid();
 
        ksft_print_header();
index bcb110e..dea33dc 100755 (executable)
@@ -50,8 +50,8 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
                        else
                                log_test "'$current_test' [$profile] overflow $target"
                        fi
+                       RET_FIN=$(( RET_FIN || RET ))
                done
-               RET_FIN=$(( RET_FIN || RET ))
        done
 done
 current_test=""
index 3e3e06e..86e7878 100644 (file)
@@ -60,7 +60,8 @@ __tc_police_test()
 
        tc_police_rules_create $count $should_fail
 
-       offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l)
+       offload_count=$(tc -j filter show dev $swp1 ingress |
+                       jq "[.[] | select(.options.in_hw == true)] | length")
        ((offload_count == count))
        check_err_fail $should_fail $? "tc police offload count"
 }
index 12c5e27..2d7fca4 100644 (file)
@@ -3,8 +3,8 @@ CFLAGS = -Wall
 CFLAGS += -Wno-nonnull
 CFLAGS += -D_GNU_SOURCE
 
-TEST_PROGS := binfmt_script non-regular
-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216
+TEST_PROGS := binfmt_script
+TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
 TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
 # Makefile is a run-time dependency, since it's accessed by the execveat test
 TEST_FILES := Makefile
index e96e279..25432b8 100644 (file)
@@ -19,7 +19,7 @@ fail() { # mesg
 
 FILTER=set_ftrace_filter
 FUNC1="schedule"
-FUNC2="do_softirq"
+FUNC2="scheduler_tick"
 
 ALL_FUNCS="#### all functions enabled ####"
 
index 06256c9..f4a15cb 100644 (file)
 #define SYSFS_PATH_MAX 256
 #define DNAME_PATH_MAX 256
 
+/*
+ * Support ancient lirc.h which does not have these values. Can be removed
+ * once RHEL 8 is no longer a relevant testing platform.
+ */
+#if RC_PROTO_MAX < 26
+#define RC_PROTO_RCMM12 24
+#define RC_PROTO_RCMM24 25
+#define RC_PROTO_RCMM32 26
+#endif
+
 static const struct {
        enum rc_proto proto;
        const char *name;
index 0e4926b..17c3f07 100644 (file)
@@ -82,7 +82,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
 TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
 TEST_GEN_PROGS_x86_64 += x86_64/amx_test
 TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
index 9ad38bd..b08d30b 100644 (file)
@@ -366,6 +366,7 @@ static struct kvm_vm *test_vm_create(void)
 {
        struct kvm_vm *vm;
        unsigned int i;
+       int ret;
        int nr_vcpus = test_args.nr_vcpus;
 
        vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -382,7 +383,11 @@ static struct kvm_vm *test_vm_create(void)
 
        ucall_init(vm, NULL);
        test_init_timer_irq(vm);
-       vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+       ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+       if (ret < 0) {
+               print_skip("Failed to create vgic-v3");
+               exit(KSFT_SKIP);
+       }
 
        /* Make all the test's cmdline args visible to the guest */
        sync_global_to_guest(vm, test_args);
index e6c7d7f..7eca977 100644 (file)
@@ -761,6 +761,10 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
 
        gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
                        GICD_BASE_GPA, GICR_BASE_GPA);
+       if (gic_fd < 0) {
+               print_skip("Failed to create vgic-v3, skipping");
+               exit(KSFT_SKIP);
+       }
 
        vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
                guest_irq_handlers[args.eoi_split][args.level_sensitive]);
index b3a0fca..f5cd0c5 100644 (file)
@@ -52,7 +52,9 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
                        nr_vcpus, nr_vcpus_created);
 
        /* Distributor setup */
-       gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+       if (_kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3,
+                              false, &gic_fd) != 0)
+               return -1;
 
        kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
                        0, &nr_irqs, true);
index 192a289..94df269 100644 (file)
@@ -455,6 +455,7 @@ static void mfd_fail_write(int fd)
                        printf("mmap()+mprotect() didn't fail as expected\n");
                        abort();
                }
+               munmap(p, mfd_def_size);
        }
 
        /* verify PUNCH_HOLE fails */
index f31205f..8c5fea6 100644 (file)
@@ -1236,7 +1236,7 @@ static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long
 }
 
 /**
- * Validate that an attached mount in our mount namespace can be idmapped.
+ * Validate that an attached mount in our mount namespace cannot be idmapped.
  * (The kernel enforces that the mount's mount namespace and the caller's mount
  *  namespace match.)
  */
@@ -1259,7 +1259,7 @@ TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
 
        attr.userns_fd  = get_userns_fd(0, 10000, 10000);
        ASSERT_GE(attr.userns_fd, 0);
-       ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+       ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
        ASSERT_EQ(close(attr.userns_fd), 0);
        ASSERT_EQ(close(open_tree_fd), 0);
 }
index 2674ba2..ff82102 100755 (executable)
@@ -71,6 +71,36 @@ chk_msk_remote_key_nr()
                __chk_nr "grep -c remote_key" $*
 }
 
+# $1: ns, $2: port
+wait_local_port_listen()
+{
+       local listener_ns="${1}"
+       local port="${2}"
+
+       local port_hex i
+
+       port_hex="$(printf "%04X" "${port}")"
+       for i in $(seq 10); do
+               ip netns exec "${listener_ns}" cat /proc/net/tcp | \
+                       awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
+                       break
+               sleep 0.1
+       done
+}
+
+wait_connected()
+{
+       local listener_ns="${1}"
+       local port="${2}"
+
+       local port_hex i
+
+       port_hex="$(printf "%04X" "${port}")"
+       for i in $(seq 10); do
+               ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break
+               sleep 0.1
+       done
+}
 
 trap cleanup EXIT
 ip netns add $ns
@@ -81,15 +111,15 @@ echo "a" | \
                ip netns exec $ns \
                        ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
                                0.0.0.0 >/dev/null &
-sleep 0.1
+wait_local_port_listen $ns 10000
 chk_msk_nr 0 "no msk on netns creation"
 
 echo "b" | \
        timeout ${timeout_test} \
                ip netns exec $ns \
-                       ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
+                       ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
                                127.0.0.1 >/dev/null &
-sleep 0.1
+wait_connected $ns 10000
 chk_msk_nr 2 "after MPC handshake "
 chk_msk_remote_key_nr 2 "....chk remote_key"
 chk_msk_fallback_nr 0 "....chk no fallback"
@@ -101,13 +131,13 @@ echo "a" | \
                ip netns exec $ns \
                        ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
                                0.0.0.0 >/dev/null &
-sleep 0.1
+wait_local_port_listen $ns 10001
 echo "b" | \
        timeout ${timeout_test} \
                ip netns exec $ns \
-                       ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
+                       ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
                                127.0.0.1 >/dev/null &
-sleep 0.1
+wait_connected $ns 10001
 chk_msk_fallback_nr 1 "check fallback"
 flush_pids
 
@@ -119,7 +149,7 @@ for I in `seq 1 $NR_CLIENTS`; do
                                ./mptcp_connect -p $((I+10001)) -l -w 10 \
                                        -t ${timeout_poll} 0.0.0.0 >/dev/null &
 done
-sleep 0.1
+wait_local_port_listen $ns $((NR_CLIENTS + 10001))
 
 for I in `seq 1 $NR_CLIENTS`; do
        echo "b" | \
index cb5809b..f0f4ab9 100755 (executable)
@@ -763,8 +763,8 @@ run_tests_disconnect()
        run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-I 3 -i $old_cin"
 
        # restore previous status
-       cout=$old_cout
-       cout_disconnect="$cout".disconnect
+       sin=$old_sin
+       sin_disconnect="$cout".disconnect
        cin=$old_cin
        cin_disconnect="$cin".disconnect
        connect_per_transfer=1
index b8bdbec..0c8a2a2 100755 (executable)
@@ -660,6 +660,7 @@ chk_join_nr()
        local ack_nr=$4
        local count
        local dump_stats
+       local with_cookie
 
        printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn"
        count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
@@ -673,12 +674,20 @@ chk_join_nr()
        fi
 
        echo -n " - synack"
+       with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies`
        count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
        [ -z "$count" ] && count=0
        if [ "$count" != "$syn_ack_nr" ]; then
-               echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
-               ret=1
-               dump_stats=1
+               # simult connections exceeding the limit with cookie enabled could go up to
+               # synack validation as the conn limit can be enforced reliably only after
+               # the subflow creation
+               if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then
+                       echo -n "[ ok ]"
+               else
+                       echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
+                       ret=1
+                       dump_stats=1
+               fi
        else
                echo -n "[ ok ]"
        fi
@@ -752,11 +761,17 @@ chk_add_nr()
        local mis_ack_nr=${8:-0}
        local count
        local dump_stats
+       local timeout
+
+       timeout=`ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout`
 
        printf "%-39s %s" " " "add"
-       count=`ip netns exec $ns2 nstat -as | grep MPTcpExtAddAddr | awk '{print $2}'`
+       count=`ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}'`
        [ -z "$count" ] && count=0
-       if [ "$count" != "$add_nr" ]; then
+
+       # if the test configured a short timeout tolerate greater then expected
+       # add addrs options, due to retransmissions
+       if [ "$count" != "$add_nr" ] && [ "$timeout" -gt 1 -o "$count" -lt "$add_nr" ]; then
                echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
                ret=1
                dump_stats=1
@@ -961,7 +976,7 @@ wait_for_tw()
        local ns=$1
 
        while [ $time -lt $timeout_ms ]; do
-               local cnt=$(ip netns exec $ns ss -t state time-wait |wc -l)
+               local cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}')
 
                [ "$cnt" = 1 ] && return 1
                time=$((time + 100))
@@ -1158,7 +1173,11 @@ signal_address_tests()
        ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags signal
        ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags signal
        ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags signal
-       run_tests $ns1 $ns2 10.0.1.1
+
+       # the peer could possibly miss some addr notification, allow retransmission
+       ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+       run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+       chk_join_nr "signal addresses race test" 3 3 3
 
        # the server will not signal the address terminating
        # the MPC subflow
index ffca314..7e81c9a 100644 (file)
@@ -6,9 +6,9 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
        ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
-       conntrack_vrf.sh
+       conntrack_vrf.sh nft_synproxy.sh
 
 LDLIBS = -lmnl
-TEST_GEN_FILES =  nf-queue
+TEST_GEN_FILES =  nf-queue connect_close
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/connect_close.c b/tools/testing/selftests/netfilter/connect_close.c
new file mode 100644 (file)
index 0000000..1c3b0ad
--- /dev/null
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <arpa/inet.h>
+#include <sys/socket.h>
+
+#define PORT 12345
+#define RUNTIME 10
+
+static struct {
+       unsigned int timeout;
+       unsigned int port;
+} opts = {
+       .timeout = RUNTIME,
+       .port = PORT,
+};
+
+static void handler(int sig)
+{
+       _exit(sig == SIGALRM ? 0 : 1);
+}
+
+static void set_timeout(void)
+{
+       struct sigaction action = {
+               .sa_handler = handler,
+       };
+
+       sigaction(SIGALRM, &action, NULL);
+
+       alarm(opts.timeout);
+}
+
+static void do_connect(const struct sockaddr_in *dst)
+{
+       int s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+
+       if (s >= 0)
+               fcntl(s, F_SETFL, O_NONBLOCK);
+
+       connect(s, (struct sockaddr *)dst, sizeof(*dst));
+       close(s);
+}
+
+static void do_accept(const struct sockaddr_in *src)
+{
+       int c, one = 1, s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+
+       if (s < 0)
+               return;
+
+       setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+       setsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+
+       bind(s, (struct sockaddr *)src, sizeof(*src));
+
+       listen(s, 16);
+
+       c = accept(s, NULL, NULL);
+       if (c >= 0)
+               close(c);
+
+       close(s);
+}
+
+static int accept_loop(void)
+{
+       struct sockaddr_in src = {
+               .sin_family = AF_INET,
+               .sin_port = htons(opts.port),
+       };
+
+       inet_pton(AF_INET, "127.0.0.1", &src.sin_addr);
+
+       set_timeout();
+
+       for (;;)
+               do_accept(&src);
+
+       return 1;
+}
+
+static int connect_loop(void)
+{
+       struct sockaddr_in dst = {
+               .sin_family = AF_INET,
+               .sin_port = htons(opts.port),
+       };
+
+       inet_pton(AF_INET, "127.0.0.1", &dst.sin_addr);
+
+       set_timeout();
+
+       for (;;)
+               do_connect(&dst);
+
+       return 1;
+}
+
+static void parse_opts(int argc, char **argv)
+{
+       int c;
+
+       while ((c = getopt(argc, argv, "t:p:")) != -1) {
+               switch (c) {
+               case 't':
+                       opts.timeout = atoi(optarg);
+                       break;
+               case 'p':
+                       opts.port = atoi(optarg);
+                       break;
+               }
+       }
+}
+
+int main(int argc, char *argv[])
+{
+       pid_t p;
+
+       parse_opts(argc, argv);
+
+       p = fork();
+       if (p < 0)
+               return 111;
+
+       if (p > 0)
+               return accept_loop();
+
+       return connect_loop();
+}
index df322e4..b35010c 100755 (executable)
@@ -1601,4 +1601,4 @@ for name in ${TESTS}; do
        done
 done
 
-[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP}
+[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0
index 6caf6ac..695a195 100755 (executable)
@@ -174,6 +174,7 @@ test_ping() {
 ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
 ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
 ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
 
 sleep 3
 
index 7d27f1f..e127297 100755 (executable)
@@ -113,6 +113,7 @@ table inet $name {
        chain output {
                type filter hook output priority $prio; policy accept;
                tcp dport 12345 queue num 3
+               tcp sport 23456 queue num 3
                jump nfq
        }
        chain post {
@@ -296,6 +297,23 @@ test_tcp_localhost()
        wait 2>/dev/null
 }
 
+test_tcp_localhost_connectclose()
+{
+       tmpfile=$(mktemp) || exit 1
+
+       ip netns exec ${nsrouter} ./connect_close -p 23456 -t $timeout &
+
+       ip netns exec ${nsrouter} ./nf-queue -q 3 -t $timeout &
+       local nfqpid=$!
+
+       sleep 1
+       rm -f "$tmpfile"
+
+       wait $rpid
+       [ $? -eq 0 ] && echo "PASS: tcp via loopback with connect/close"
+       wait 2>/dev/null
+}
+
 test_tcp_localhost_requeue()
 {
 ip netns exec ${nsrouter} nft -f /dev/stdin <<EOF
@@ -424,6 +442,7 @@ test_queue 20
 
 test_tcp_forward
 test_tcp_localhost
+test_tcp_localhost_connectclose
 test_tcp_localhost_requeue
 test_icmp_vrf
 
diff --git a/tools/testing/selftests/netfilter/nft_synproxy.sh b/tools/testing/selftests/netfilter/nft_synproxy.sh
new file mode 100755 (executable)
index 0000000..b62933b
--- /dev/null
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+rnd=$(mktemp -u XXXXXXXX)
+nsr="nsr-$rnd" # synproxy machine
+ns1="ns1-$rnd"  # iperf client
+ns2="ns2-$rnd"  # iperf server
+
+checktool (){
+       if ! $1 > /dev/null 2>&1; then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "iperf3 --version" "run test without iperf3"
+checktool "ip netns add $nsr" "create net namespace"
+
+modprobe -q nf_conntrack
+
+ip netns add $ns1
+ip netns add $ns2
+
+cleanup() {
+       ip netns pids $ns1 | xargs kill 2>/dev/null
+       ip netns pids $ns2 | xargs kill 2>/dev/null
+       ip netns del $ns1
+       ip netns del $ns2
+
+       ip netns del $nsr
+}
+
+trap cleanup EXIT
+
+ip link add veth0 netns $nsr type veth peer name eth0 netns $ns1
+ip link add veth1 netns $nsr type veth peer name eth0 netns $ns2
+
+for dev in lo veth0 veth1; do
+ip -net $nsr link set $dev up
+done
+
+ip -net $nsr addr add 10.0.1.1/24 dev veth0
+ip -net $nsr addr add 10.0.2.1/24 dev veth1
+
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth0.forwarding=1
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth1.forwarding=1
+ip netns exec $nsr sysctl -q net.netfilter.nf_conntrack_tcp_loose=0
+
+for n in $ns1 $ns2; do
+  ip -net $n link set lo up
+  ip -net $n link set eth0 up
+done
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns2 addr add 10.0.2.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns2 route add default via 10.0.2.1
+
+# test basic connectivity
+if ! ip netns exec $ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
+  echo "ERROR: $ns1 cannot reach $ns2" 1>&2
+  exit 1
+fi
+
+if ! ip netns exec $ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
+  echo "ERROR: $ns2 cannot reach $ns1" 1>&2
+  exit 1
+fi
+
+ip netns exec $ns2 iperf3 -s > /dev/null 2>&1 &
+# ip netns exec $nsr tcpdump -vvv -n -i veth1 tcp | head -n 10 &
+
+sleep 1
+
+ip netns exec $nsr nft -f - <<EOF
+table inet filter {
+   chain prerouting {
+      type filter hook prerouting priority -300; policy accept;
+      meta iif veth0 tcp flags syn counter notrack
+   }
+
+  chain forward {
+      type filter hook forward priority 0; policy accept;
+
+      ct state new,established counter accept
+
+      meta iif veth0 meta l4proto tcp ct state untracked,invalid synproxy mss 1460 sack-perm timestamp
+
+      ct state invalid counter drop
+
+      # make ns2 unreachable w.o. tcp synproxy
+      tcp flags syn counter drop
+   }
+}
+EOF
+if [ $? -ne 0 ]; then
+       echo "SKIP: Cannot add nft synproxy"
+       exit $ksft_skip
+fi
+
+ip netns exec $ns1 timeout 5 iperf3 -c 10.0.2.99 -n $((1 * 1024 * 1024)) > /dev/null
+
+if [ $? -ne 0 ]; then
+       echo "FAIL: iperf3 returned an error" 1>&2
+       ret=$?
+       ip netns exec $nsr nft list ruleset
+else
+       echo "PASS: synproxy connection successful"
+fi
+
+exit $ret
index 01f8d3c..6922d64 100644 (file)
@@ -68,7 +68,7 @@
 #define PIDFD_SKIP 3
 #define PIDFD_XFAIL 4
 
-int wait_for_pid(pid_t pid)
+static inline int wait_for_pid(pid_t pid)
 {
        int status, ret;
 
@@ -78,13 +78,20 @@ again:
                if (errno == EINTR)
                        goto again;
 
+               ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
                return -1;
        }
 
-       if (!WIFEXITED(status))
+       if (!WIFEXITED(status)) {
+               ksft_print_msg(
+                      "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
+                      WIFSIGNALED(status), WTERMSIG(status));
                return -1;
+       }
 
-       return WEXITSTATUS(status);
+       ret = WEXITSTATUS(status);
+       ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret);
+       return ret;
 }
 
 static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
index 2255852..3fd8e90 100644 (file)
@@ -12,6 +12,7 @@
 #include <string.h>
 #include <syscall.h>
 #include <sys/wait.h>
+#include <sys/mman.h>
 
 #include "pidfd.h"
 #include "../kselftest.h"
@@ -80,7 +81,10 @@ static inline int error_check(struct error *err, const char *test_name)
        return err->code;
 }
 
+#define CHILD_STACK_SIZE 8192
+
 struct child {
+       char *stack;
        pid_t pid;
        int   fd;
 };
@@ -89,17 +93,22 @@ static struct child clone_newns(int (*fn)(void *), void *args,
                                struct error *err)
 {
        static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
-       size_t stack_size = 1024;
-       char *stack[1024] = { 0 };
        struct child ret;
 
        if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
                flags |= CLONE_NEWUSER;
 
+       ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE,
+                        MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+       if (ret.stack == MAP_FAILED) {
+               error_set(err, -1, "mmap of stack failed (errno %d)", errno);
+               return ret;
+       }
+
 #ifdef __ia64__
-       ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd);
+       ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd);
 #else
-       ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd);
+       ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd);
 #endif
 
        if (ret.pid < 0) {
@@ -129,6 +138,11 @@ static inline int child_join(struct child *child, struct error *err)
        else if (r > 0)
                error_set(err, r, "child %d reported: %d", child->pid, r);
 
+       if (munmap(child->stack, CHILD_STACK_SIZE)) {
+               error_set(err, -1, "munmap of child stack failed (errno %d)", errno);
+               r = -1;
+       }
+
        return r;
 }
 
index 529eb70..9a2d649 100644 (file)
@@ -441,7 +441,6 @@ static void test_pidfd_poll_exec(int use_waitpid)
 {
        int pid, pidfd = 0;
        int status, ret;
-       pthread_t t1;
        time_t prog_start = time(NULL);
        const char *test_name = "pidfd_poll check for premature notification on child thread exec";
 
@@ -500,13 +499,14 @@ static int child_poll_leader_exit_test(void *args)
         */
        *child_exit_secs = time(NULL);
        syscall(SYS_exit, 0);
+       /* Never reached, but appeases compiler thinking we should return. */
+       exit(0);
 }
 
 static void test_pidfd_poll_leader_exit(int use_waitpid)
 {
        int pid, pidfd = 0;
-       int status, ret;
-       time_t prog_start = time(NULL);
+       int status, ret = 0;
        const char *test_name = "pidfd_poll check for premature notification on non-empty"
                                "group leader exit";
 
index be2943f..17999e0 100644 (file)
@@ -39,7 +39,7 @@ static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
 
 TEST(wait_simple)
 {
-       int pidfd = -1, status = 0;
+       int pidfd = -1;
        pid_t parent_tid = -1;
        struct clone_args args = {
                .parent_tid = ptr_to_u64(&parent_tid),
@@ -47,7 +47,6 @@ TEST(wait_simple)
                .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
                .exit_signal = SIGCHLD,
        };
-       int ret;
        pid_t pid;
        siginfo_t info = {
                .si_signo = 0,
@@ -88,7 +87,7 @@ TEST(wait_simple)
 
 TEST(wait_states)
 {
-       int pidfd = -1, status = 0;
+       int pidfd = -1;
        pid_t parent_tid = -1;
        struct clone_args args = {
                .parent_tid = ptr_to_u64(&parent_tid),
index 0ebfe8b..585f7a0 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-CFLAGS += -Wl,-no-as-needed -Wall
+CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/
 LDFLAGS += -lpthread
 
 TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark
index 2a7c336..1d68908 100644 (file)
@@ -3,9 +3,10 @@
  * hugepage-mremap:
  *
  * Example of remapping huge page memory in a user application using the
- * mremap system call.  Code assumes a hugetlbfs filesystem is mounted
- * at './huge'.  The amount of memory used by this test is decided by a command
- * line argument in MBs. If missing, the default amount is 10MB.
+ * mremap system call.  The path to a file in a hugetlbfs filesystem must
+ * be passed as the last argument to this test.  The amount of memory used
+ * by this test in MBs can optionally be passed as an argument.  If no memory
+ * amount is passed, the default amount is 10MB.
  *
  * To make sure the test triggers pmd sharing and goes through the 'unshare'
  * path in the mremap code use 1GB (1024) or more.
@@ -25,7 +26,6 @@
 #define DEFAULT_LENGTH_MB 10UL
 #define MB_TO_BYTES(x) (x * 1024 * 1024)
 
-#define FILE_NAME "huge/hugepagefile"
 #define PROTECTION (PROT_READ | PROT_WRITE | PROT_EXEC)
 #define FLAGS (MAP_SHARED | MAP_ANONYMOUS)
 
@@ -107,17 +107,26 @@ static void register_region_with_uffd(char *addr, size_t len)
 
 int main(int argc, char *argv[])
 {
+       size_t length;
+
+       if (argc != 2 && argc != 3) {
+               printf("Usage: %s [length_in_MB] <hugetlb_file>\n", argv[0]);
+               exit(1);
+       }
+
        /* Read memory length as the first arg if valid, otherwise fallback to
-        * the default length. Any additional args are ignored.
+        * the default length.
         */
-       size_t length = argc > 1 ? (size_t)atoi(argv[1]) : 0UL;
+       if (argc == 3)
+               length = argc > 2 ? (size_t)atoi(argv[1]) : 0UL;
 
        length = length > 0 ? length : DEFAULT_LENGTH_MB;
        length = MB_TO_BYTES(length);
 
        int ret = 0;
 
-       int fd = open(FILE_NAME, O_CREAT | O_RDWR, 0755);
+       /* last arg is the hugetlb file name */
+       int fd = open(argv[argc-1], O_CREAT | O_RDWR, 0755);
 
        if (fd < 0) {
                perror("Open failed");
@@ -169,5 +178,8 @@ int main(int argc, char *argv[])
 
        munmap(addr, length);
 
+       close(fd);
+       unlink(argv[argc-1]);
+
        return ret;
 }
index d91bde5..eed4432 100644 (file)
@@ -17,9 +17,6 @@
 #define MAP_FIXED_NOREPLACE 0x100000
 #endif
 
-#define BASE_ADDRESS   (256ul * 1024 * 1024)
-
-
 static void dump_maps(void)
 {
        char cmd[32];
@@ -28,18 +25,46 @@ static void dump_maps(void)
        system(cmd);
 }
 
+static unsigned long find_base_addr(unsigned long size)
+{
+       void *addr;
+       unsigned long flags;
+
+       flags = MAP_PRIVATE | MAP_ANONYMOUS;
+       addr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
+       if (addr == MAP_FAILED) {
+               printf("Error: couldn't map the space we need for the test\n");
+               return 0;
+       }
+
+       if (munmap(addr, size) != 0) {
+               printf("Error: couldn't map the space we need for the test\n");
+               return 0;
+       }
+       return (unsigned long)addr;
+}
+
 int main(void)
 {
+       unsigned long base_addr;
        unsigned long flags, addr, size, page_size;
        char *p;
 
        page_size = sysconf(_SC_PAGE_SIZE);
 
+       //let's find a base addr that is free before we start the tests
+       size = 5 * page_size;
+       base_addr = find_base_addr(size);
+       if (!base_addr) {
+               printf("Error: couldn't map the space we need for the test\n");
+               return 1;
+       }
+
        flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
 
        // Check we can map all the areas we need below
        errno = 0;
-       addr = BASE_ADDRESS;
+       addr = base_addr;
        size = 5 * page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
 
@@ -60,7 +85,7 @@ int main(void)
        printf("unmap() successful\n");
 
        errno = 0;
-       addr = BASE_ADDRESS + page_size;
+       addr = base_addr + page_size;
        size = 3 * page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -80,7 +105,7 @@ int main(void)
         *     +4 |  free  | new
         */
        errno = 0;
-       addr = BASE_ADDRESS;
+       addr = base_addr;
        size = 5 * page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -101,7 +126,7 @@ int main(void)
         *     +4 |  free  |
         */
        errno = 0;
-       addr = BASE_ADDRESS + (2 * page_size);
+       addr = base_addr + (2 * page_size);
        size = page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -121,7 +146,7 @@ int main(void)
         *     +4 |  free  | new
         */
        errno = 0;
-       addr = BASE_ADDRESS + (3 * page_size);
+       addr = base_addr + (3 * page_size);
        size = 2 * page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -141,7 +166,7 @@ int main(void)
         *     +4 |  free  |
         */
        errno = 0;
-       addr = BASE_ADDRESS;
+       addr = base_addr;
        size = 2 * page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -161,7 +186,7 @@ int main(void)
         *     +4 |  free  |
         */
        errno = 0;
-       addr = BASE_ADDRESS;
+       addr = base_addr;
        size = page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -181,7 +206,7 @@ int main(void)
         *     +4 |  free  |  new
         */
        errno = 0;
-       addr = BASE_ADDRESS + (4 * page_size);
+       addr = base_addr + (4 * page_size);
        size = page_size;
        p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
        printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -192,7 +217,7 @@ int main(void)
                return 1;
        }
 
-       addr = BASE_ADDRESS;
+       addr = base_addr;
        size = 5 * page_size;
        if (munmap((void *)addr, size) != 0) {
                dump_maps();
index 75d4017..71d2dc1 100755 (executable)
@@ -111,13 +111,14 @@ fi
 echo "-----------------------"
 echo "running hugepage-mremap"
 echo "-----------------------"
-./hugepage-mremap 256
+./hugepage-mremap $mnt/huge_mremap
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
        exitcode=1
 else
        echo "[PASS]"
 fi
+rm -f $mnt/huge_mremap
 
 echo "NOTE: The above hugetlb tests provide minimal coverage.  Use"
 echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for"
index 2f49c9a..3fc1d2e 100644 (file)
@@ -46,6 +46,7 @@
 #include <signal.h>
 #include <poll.h>
 #include <string.h>
+#include <linux/mman.h>
 #include <sys/mman.h>
 #include <sys/syscall.h>
 #include <sys/ioctl.h>
index 7c39728..5a1eda6 100644 (file)
@@ -1,5 +1,6 @@
 NAME   :=      rtla
-VERSION        :=      0.5
+# Follow the kernel version
+VERSION :=     $(shell cat VERSION 2> /dev/null || make -sC ../../.. kernelversion)
 
 # From libtracefs:
 # Makefiles suck: This macro sets a default value of $(2) for the
@@ -85,6 +86,7 @@ clean: doc_clean
 
 tarball: clean
        rm -rf $(NAME)-$(VERSION) && mkdir $(NAME)-$(VERSION)
+       echo $(VERSION) > $(NAME)-$(VERSION)/VERSION
        cp -r $(DIRS) $(FILES) $(NAME)-$(VERSION)
        mkdir $(NAME)-$(VERSION)/Documentation/
        cp -rp $(SRCTREE)/../../../Documentation/tools/rtla/* $(NAME)-$(VERSION)/Documentation/
index 7b73d1e..e60f186 100644 (file)
@@ -750,6 +750,9 @@ void osnoise_put_context(struct osnoise_context *context)
  */
 void osnoise_destroy_tool(struct osnoise_tool *top)
 {
+       if (!top)
+               return;
+
        trace_instance_destroy(&top->trace);
 
        if (top->context)
@@ -807,7 +810,7 @@ struct osnoise_tool *osnoise_init_trace_tool(char *tracer)
 
        retval = enable_tracer_by_name(trace->trace.inst, tracer);
        if (retval) {
-               err_msg("Could not enable osnoiser tracer for tracing\n");
+               err_msg("Could not enable %s tracer for tracing\n", tracer);
                goto out_err;
        }
 
index 180fcbe..52c053c 100644 (file)
@@ -426,7 +426,7 @@ static void osnoise_hist_usage(char *usage)
        static const char * const msg[] = {
                "",
                "  usage: rtla osnoise hist [-h] [-D] [-d s] [-p us] [-r us] [-s us] [-S us] [-t[=file]] \\",
-               "         [-c cpu-list] [-P priority] [-b N] [-e N] [--no-header] [--no-summary] \\",
+               "         [-c cpu-list] [-P priority] [-b N] [-E N] [--no-header] [--no-summary] \\",
                "         [--no-index] [--with-zeros]",
                "",
                "         -h/--help: print this menu",
@@ -439,7 +439,7 @@ static void osnoise_hist_usage(char *usage)
                "         -D/--debug: print debug info",
                "         -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]",
                "         -b/--bucket-size N: set the histogram bucket size (default 1)",
-               "         -e/--entries N: set the number of entries of the histogram (default 256)",
+               "         -E/--entries N: set the number of entries of the histogram (default 256)",
                "            --no-header: do not print header",
                "            --no-summary: do not print summary",
                "            --no-index: do not print index",
@@ -486,7 +486,7 @@ static struct osnoise_hist_params
        while (1) {
                static struct option long_options[] = {
                        {"bucket-size",         required_argument,      0, 'b'},
-                       {"entries",             required_argument,      0, 'e'},
+                       {"entries",             required_argument,      0, 'E'},
                        {"cpus",                required_argument,      0, 'c'},
                        {"debug",               no_argument,            0, 'D'},
                        {"duration",            required_argument,      0, 'd'},
@@ -507,7 +507,7 @@ static struct osnoise_hist_params
                /* getopt_long stores the option index here. */
                int option_index = 0;
 
-               c = getopt_long(argc, argv, "c:b:d:e:Dhp:P:r:s:S:t::0123",
+               c = getopt_long(argc, argv, "c:b:d:E:Dhp:P:r:s:S:t::0123",
                                 long_options, &option_index);
 
                /* detect the end of the options. */
@@ -534,7 +534,7 @@ static struct osnoise_hist_params
                        if (!params->duration)
                                osnoise_hist_usage("Invalid -D duration\n");
                        break;
-               case 'e':
+               case 'E':
                        params->entries = get_llong_from_str(optarg);
                        if ((params->entries < 10) || (params->entries > 9999999))
                                osnoise_hist_usage("Entries must be > 10 and < 9999999\n");
@@ -701,9 +701,9 @@ osnoise_hist_set_signals(struct osnoise_hist_params *params)
 int osnoise_hist_main(int argc, char *argv[])
 {
        struct osnoise_hist_params *params;
+       struct osnoise_tool *record = NULL;
+       struct osnoise_tool *tool = NULL;
        struct trace_instance *trace;
-       struct osnoise_tool *record;
-       struct osnoise_tool *tool;
        int return_value = 1;
        int retval;
 
@@ -792,9 +792,8 @@ int osnoise_hist_main(int argc, char *argv[])
 out_hist:
        osnoise_free_histogram(tool->data);
 out_destroy:
+       osnoise_destroy_tool(record);
        osnoise_destroy_tool(tool);
-       if (params->trace_output)
-               osnoise_destroy_tool(record);
        free(params);
 out_exit:
        exit(return_value);
index 332b2ac..7af769b 100644 (file)
@@ -483,9 +483,9 @@ static void osnoise_top_set_signals(struct osnoise_top_params *params)
 int osnoise_top_main(int argc, char **argv)
 {
        struct osnoise_top_params *params;
+       struct osnoise_tool *record = NULL;
+       struct osnoise_tool *tool = NULL;
        struct trace_instance *trace;
-       struct osnoise_tool *record;
-       struct osnoise_tool *tool;
        int return_value = 1;
        int retval;
 
@@ -571,9 +571,9 @@ int osnoise_top_main(int argc, char **argv)
 
 out_top:
        osnoise_free_top(tool->data);
+       osnoise_destroy_tool(record);
        osnoise_destroy_tool(tool);
-       if (params->trace_output)
-               osnoise_destroy_tool(record);
+       free(params);
 out_exit:
        exit(return_value);
 }
index 235f962..237e173 100644 (file)
@@ -429,7 +429,7 @@ static void timerlat_hist_usage(char *usage)
        char *msg[] = {
                "",
                "  usage: [rtla] timerlat hist [-h] [-q] [-d s] [-D] [-n] [-p us] [-i us] [-T us] [-s us] [-t[=file]] \\",
-               "         [-c cpu-list] [-P priority] [-e N] [-b N]  [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
+               "         [-c cpu-list] [-P priority] [-E N] [-b N]  [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
                "         [--no-index] [--with-zeros]",
                "",
                "         -h/--help: print this menu",
@@ -443,7 +443,7 @@ static void timerlat_hist_usage(char *usage)
                "         -T/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]",
                "         -n/--nano: display data in nanoseconds",
                "         -b/--bucket-size N: set the histogram bucket size (default 1)",
-               "         -e/--entries N: set the number of entries of the histogram (default 256)",
+               "         -E/--entries N: set the number of entries of the histogram (default 256)",
                "            --no-irq: ignore IRQ latencies",
                "            --no-thread: ignore thread latencies",
                "            --no-header: do not print header",
@@ -494,7 +494,7 @@ static struct timerlat_hist_params
                        {"cpus",                required_argument,      0, 'c'},
                        {"bucket-size",         required_argument,      0, 'b'},
                        {"debug",               no_argument,            0, 'D'},
-                       {"entries",             required_argument,      0, 'e'},
+                       {"entries",             required_argument,      0, 'E'},
                        {"duration",            required_argument,      0, 'd'},
                        {"help",                no_argument,            0, 'h'},
                        {"irq",                 required_argument,      0, 'i'},
@@ -516,7 +516,7 @@ static struct timerlat_hist_params
                /* getopt_long stores the option index here. */
                int option_index = 0;
 
-               c = getopt_long(argc, argv, "c:b:d:e:Dhi:np:P:s:t::T:012345",
+               c = getopt_long(argc, argv, "c:b:d:E:Dhi:np:P:s:t::T:012345",
                                 long_options, &option_index);
 
                /* detect the end of the options. */
@@ -543,7 +543,7 @@ static struct timerlat_hist_params
                        if (!params->duration)
                                timerlat_hist_usage("Invalid -D duration\n");
                        break;
-               case 'e':
+               case 'E':
                        params->entries = get_llong_from_str(optarg);
                        if ((params->entries < 10) || (params->entries > 9999999))
                                        timerlat_hist_usage("Entries must be > 10 and < 9999999\n");
@@ -729,9 +729,9 @@ timerlat_hist_set_signals(struct timerlat_hist_params *params)
 int timerlat_hist_main(int argc, char *argv[])
 {
        struct timerlat_hist_params *params;
+       struct osnoise_tool *record = NULL;
+       struct osnoise_tool *tool = NULL;
        struct trace_instance *trace;
-       struct osnoise_tool *record;
-       struct osnoise_tool *tool;
        int return_value = 1;
        int retval;
 
@@ -813,9 +813,8 @@ int timerlat_hist_main(int argc, char *argv[])
 
 out_hist:
        timerlat_free_histogram(tool->data);
+       osnoise_destroy_tool(record);
        osnoise_destroy_tool(tool);
-       if (params->trace_output)
-               osnoise_destroy_tool(record);
        free(params);
 out_exit:
        exit(return_value);
index 1ebd529..d4187f6 100644 (file)
@@ -521,9 +521,9 @@ timerlat_top_set_signals(struct timerlat_top_params *params)
 int timerlat_top_main(int argc, char *argv[])
 {
        struct timerlat_top_params *params;
+       struct osnoise_tool *record = NULL;
+       struct osnoise_tool *top = NULL;
        struct trace_instance *trace;
-       struct osnoise_tool *record;
-       struct osnoise_tool *top;
        int return_value = 1;
        int retval;
 
@@ -609,9 +609,8 @@ int timerlat_top_main(int argc, char *argv[])
 
 out_top:
        timerlat_free_top(top->data);
+       osnoise_destroy_tool(record);
        osnoise_destroy_tool(top);
-       if (params->trace_output)
-               osnoise_destroy_tool(record);
        free(params);
 out_exit:
        exit(return_value);
index 107a0c6..83de259 100644 (file)
@@ -20,14 +20,14 @@ int enable_tracer_by_name(struct tracefs_instance *inst, const char *tracer_name
 
        tracer = TRACEFS_TRACER_CUSTOM;
 
-       debug_msg("enabling %s tracer\n", tracer_name);
+       debug_msg("Enabling %s tracer\n", tracer_name);
 
        retval = tracefs_tracer_set(inst, tracer, tracer_name);
        if (retval < 0) {
                if (errno == ENODEV)
-                       err_msg("tracer %s not found!\n", tracer_name);
+                       err_msg("Tracer %s not found!\n", tracer_name);
 
-               err_msg("failed to enable the tracer %s\n", tracer_name);
+               err_msg("Failed to enable the %s tracer\n", tracer_name);
                return -1;
        }
 
@@ -44,7 +44,7 @@ void disable_tracer(struct tracefs_instance *inst)
 
        retval = tracefs_tracer_set(inst, t);
        if (retval < 0)
-               err_msg("oops, error disabling tracer\n");
+               err_msg("Oops, error disabling tracer\n");
 }
 
 /*
index 1c9f0ee..ffaf8ec 100644 (file)
@@ -77,11 +77,11 @@ void get_duration(time_t start_time, char *output, int output_size)
        time_t duration;
 
        duration = difftime(now, start_time);
-       tm_info = localtime(&duration);
+       tm_info = gmtime(&duration);
 
        snprintf(output, output_size, "%3d %02d:%02d:%02d",
                        tm_info->tm_yday,
-                       tm_info->tm_hour - 1,
+                       tm_info->tm_hour,
                        tm_info->tm_min,
                        tm_info->tm_sec);
 }
diff --git a/tools/virtio/linux/mm_types.h b/tools/virtio/linux/mm_types.h
new file mode 100644 (file)
index 0000000..356ba4d
--- /dev/null
@@ -0,0 +1,3 @@
+struct folio {
+       struct page page;
+};
index cb3f29c..23f142a 100644 (file)
@@ -130,6 +130,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
        memset(dev, 0, sizeof *dev);
        dev->vdev.features = features;
        INIT_LIST_HEAD(&dev->vdev.vqs);
+       spin_lock_init(&dev->vdev.vqs_list_lock);
        dev->buf_size = 1024;
        dev->buf = malloc(dev->buf_size);
        assert(dev->buf);
index 58d31da..0afc016 100644 (file)
@@ -5528,9 +5528,7 @@ static int kvm_suspend(void)
 static void kvm_resume(void)
 {
        if (kvm_usage_count) {
-#ifdef CONFIG_LOCKDEP
-               WARN_ON(lockdep_is_held(&kvm_count_lock));
-#endif
+               lockdep_assert_not_held(&kvm_count_lock);
                hardware_enable_nolock(NULL);
        }
 }