Merge remote-tracking branch 'torvalds/master' into perf/core
authorArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 22 Jun 2021 16:56:50 +0000 (13:56 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 22 Jun 2021 16:56:50 +0000 (13:56 -0300)
To pick up fixes, since perf/urgent is already upstream.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
725 files changed:
.clang-format
.mailmap
Documentation/devicetree/bindings/connector/usb-connector.yaml
Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
Documentation/devicetree/bindings/media/renesas,drif.yaml
Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
Documentation/riscv/vm-layout.rst
Documentation/virt/kvm/mmu.rst
Documentation/vm/slub.rst
MAINTAINERS
Makefile
arch/arc/include/uapi/asm/sigcontext.h
arch/arc/kernel/signal.c
arch/arc/kernel/vmlinux.lds.S
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6q-dhcom-som.dtsi
arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
arch/arm/boot/dts/imx7d-meerkat96.dts
arch/arm/boot/dts/imx7d-pico.dtsi
arch/arm/include/asm/cpuidle.h
arch/arm/kernel/setup.c
arch/arm/mach-imx/pm-imx27.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/pm.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
arch/arm64/boot/dts/ti/k3-am64-main.dtsi
arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
arch/arm64/boot/dts/ti/k3-am65-main.dtsi
arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-am654-base-board.dts
arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
arch/mips/mm/cache.c
arch/powerpc/include/asm/jump_label.h
arch/powerpc/include/asm/pte-walk.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/io-workarounds.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/mm/mem.c
arch/powerpc/perf/core-book3s.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/Makefile
arch/riscv/boot/dts/microchip/Makefile
arch/riscv/boot/dts/sifive/Makefile
arch/riscv/boot/dts/sifive/fu740-c000.dtsi
arch/riscv/errata/sifive/Makefile
arch/riscv/include/asm/alternative-macros.h
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/setup.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vmlinux-xip.lds.S
arch/riscv/mm/init.c
arch/riscv/mm/kasan_init.c
arch/s390/kernel/entry.S
arch/x86/Makefile
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/thermal.h
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/cpu/sgx/virt.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/setup.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/ioremap.c
arch/x86/mm/mem_encrypt_identity.c
arch/x86/mm/numa.c
arch/x86/pci/fixup.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
crypto/async_tx/async_xor.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/bus.c
drivers/acpi/sleep.c
drivers/base/memory.c
drivers/block/loop.c
drivers/block/loop.h
drivers/bluetooth/btusb.c
drivers/bus/mhi/pci_generic.c
drivers/bus/ti-sysc.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/cppc_cpufreq.c
drivers/dma/Kconfig
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
drivers/dma/idxd/cdev.c
drivers/dma/idxd/init.c
drivers/dma/ipu/ipu_irq.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/pl330.c
drivers/dma/qcom/Kconfig
drivers/dma/sf-pdma/Kconfig
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-mdma.c
drivers/dma/xilinx/xilinx_dpdma.c
drivers/dma/xilinx/zynqmp_dma.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/fdtparams.c
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/memattr.c
drivers/gpio/gpio-wcd934x.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_mm.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/host1x/bus.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.c
drivers/hid/hid-a4tech.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ft260.c
drivers/hid/hid-gt683r.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/hid-semitek.c [new file with mode: 0644]
drivers/hid/hid-sensor-custom.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-thrustmaster.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/surface-hid/surface_hid_core.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-pidff.c
drivers/hwmon/corsair-psu.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/pmbus/fsp-3y.c
drivers/hwmon/pmbus/isl68137.c
drivers/hwmon/pmbus/q54sj108a2.c
drivers/hwmon/scpi-hwmon.c
drivers/hwmon/tps23861.c
drivers/i2c/busses/i2c-altera.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-tegra-bpmp.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/doorbell.c
drivers/infiniband/hw/mlx5/fs.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
drivers/irqchip/irq-gic-v3.c
drivers/md/bcache/bcache.h
drivers/md/bcache/request.c
drivers/md/bcache/stats.c
drivers/md/bcache/stats.h
drivers/md/bcache/sysfs.c
drivers/misc/cardreader/rtl8411.c
drivers/misc/cardreader/rts5209.c
drivers/misc/cardreader/rts5227.c
drivers/misc/cardreader/rts5228.c
drivers/misc/cardreader/rts5229.c
drivers/misc/cardreader/rts5249.c
drivers/misc/cardreader/rts5260.c
drivers/misc/cardreader/rts5261.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/net/caif/caif_serial.c
drivers/net/can/usb/mcba_usb.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/ec_bhf.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/hamradio/mkiss.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/mhi/net.c
drivers/net/phy/dp83867.c
drivers/net/usb/cdc_eem.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireguard/Makefile
drivers/net/wireguard/allowedips.c
drivers/net/wireguard/allowedips.h
drivers/net/wireguard/main.c
drivers/net/wireguard/peer.c
drivers/net/wireguard/peer.h
drivers/net/wireguard/selftest/allowedips.c
drivers/net/wireguard/socket.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt7615/init.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/xen-netback/interface.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/pci/controller/dwc/Makefile
drivers/pci/controller/dwc/pcie-tegra194-acpi.c [new file with mode: 0644]
drivers/pci/controller/dwc/pcie-tegra194.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/of.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/phy/broadcom/phy-brcm-usb-init.h
drivers/phy/cadence/phy-cadence-sierra.c
drivers/phy/mediatek/phy-mtk-tphy.c
drivers/phy/microchip/sparx5_serdes.c
drivers/phy/ralink/phy-mt7621-pci.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/aspeed/pinmux-aspeed.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/pinctrl/ralink/pinctrl-rt2880.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/surface_aggregator_registry.c
drivers/platform/surface/surface_dtx.c
drivers/platform/x86/thinkpad_acpi.c
drivers/ptp/ptp_clock.c
drivers/regulator/Kconfig
drivers/regulator/atc260x-regulator.c
drivers/regulator/bd718x7-regulator.c
drivers/regulator/core.c
drivers/regulator/cros-ec-regulator.c
drivers/regulator/da9121-regulator.c
drivers/regulator/fan53555.c
drivers/regulator/fan53880.c
drivers/regulator/fixed.c
drivers/regulator/helpers.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/hi655x-regulator.c
drivers/regulator/max77620-regulator.c
drivers/regulator/mt6315-regulator.c
drivers/regulator/rt4801-regulator.c
drivers/regulator/rtmv20-regulator.c
drivers/regulator/scmi-regulator.c
drivers/s390/crypto/ap_queue.c
drivers/scsi/hosts.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/soc/amlogic/meson-clk-measure.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-omap-uwire.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-zynq-qspi.c
drivers/staging/ralink-gdma/ralink-gdma.c
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/target/target_core_transport.c
drivers/tee/optee/call.c
drivers/tee/optee/optee_msg.h
drivers/thermal/intel/therm_throt.c
drivers/tty/serial/8250/8250_exar.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/chipidea/usbmisc_imx.c
drivers/usb/core/hub.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/dwc3-meson-g12a.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/config.c
drivers/usb/gadget/function/f_ecm.c
drivers/usb/gadget/function/f_eem.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_loopback.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/f_serial.c
drivers/usb/gadget/function/f_sourcesink.c
drivers/usb/gadget/function/f_subset.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.h
drivers/usb/misc/brcmstb-usb-pinmap.c
drivers/usb/musb/musb_core.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/omninet.c
drivers/usb/serial/quatech2.c
drivers/usb/typec/mux.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tcpm/wcove.c
drivers/usb/typec/ucsi/ucsi.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/core/fbmem.c
fs/afs/main.c
fs/afs/write.c
fs/btrfs/block-group.c
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/reflink.c
fs/btrfs/tree-log.c
fs/btrfs/zoned.c
fs/coredump.c
fs/debugfs/file.c
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/fast_commit.h
fs/ext4/ialloc.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/gfs2/file.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/nfs/client.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfstrace.h
fs/notify/fanotify/fanotify_user.c
fs/ocfs2/file.c
fs/proc/base.c
include/asm-generic/vmlinux.lds.h
include/dt-bindings/usb/pd.h
include/linux/arch_topology.h
include/linux/avf/virtchnl.h
include/linux/compiler_attributes.h
include/linux/entry-kvm.h
include/linux/fb.h
include/linux/hid.h
include/linux/host1x.h
include/linux/huge_mm.h
include/linux/hugetlb.h
include/linux/kvm_host.h
include/linux/mfd/rohm-bd70528.h
include/linux/mfd/rohm-bd71828.h
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/transobj.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/pci.h
include/linux/pgtable.h
include/linux/platform_data/ti-sysc.h
include/linux/ptp_clock_kernel.h
include/linux/rmap.h
include/linux/rtsx_pci.h
include/linux/sched.h
include/linux/socket.h
include/linux/swapops.h
include/linux/tick.h
include/linux/usb/pd.h
include/linux/usb/pd_ext_sdb.h
include/net/caif/caif_dev.h
include/net/caif/cfcnfg.h
include/net/caif/cfserl.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/sock.h
include/net/tls.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/in.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/io_uring.h
include/uapi/linux/virtio_ids.h
init/main.c
kernel/bpf/helpers.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/crash_core.c
kernel/entry/common.c
kernel/events/core.c
kernel/irq_work.c
kernel/printk/printk_safe.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/pelt.h
kernel/time/tick-sched.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
lib/crc64.c
mm/debug_vm_pgtable.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kasan/init.c
mm/kfence/core.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/page_alloc.c
mm/page_vma_mapped.c
mm/pgtable-generic.c
mm/rmap.c
mm/slab_common.c
mm/slub.c
mm/sparse.c
mm/swapfile.c
mm/truncate.c
net/appletalk/aarp.c
net/batman-adv/bat_iv_ogm.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/bluetooth/smp.c
net/bridge/br_private.h
net/bridge/br_vlan_tunnel.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/caif/cfcnfg.c
net/caif/cfserl.c
net/can/bcm.c
net/can/isotp.c
net/can/j1939/transport.c
net/can/raw.c
net/compat.c
net/core/devlink.c
net/core/fib_rules.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dsa/tag_8021q.c
net/ethtool/eeprom.c
net/ethtool/ioctl.c
net/ethtool/strset.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ipv4/af_inet.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/ipconfig.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/udp.c
net/mac80211/debugfs.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/tx.c
net/mac80211/util.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_ct.c
net/nfc/llcp_sock.c
net/packet/af_packet.c
net/qrtr/qrtr.c
net/rds/recv.c
net/sched/act_ct.c
net/sched/sch_cake.c
net/sched/sch_htb.c
net/socket.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/unix/af_unix.c
net/wireless/Makefile
net/wireless/core.c
net/wireless/pmsr.c
net/wireless/sysfs.c
net/wireless/util.c
net/x25/af_x25.c
samples/vfio-mdev/mdpy-fb.c
scripts/Makefile.modfinal
scripts/link-vmlinux.sh
scripts/recordmcount.h
sound/core/control_led.c
sound/core/seq/seq_timer.c
sound/core/timer.c
sound/firewire/amdtp-stream.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/rt5659.c
sound/soc/codecs/rt5682-sdw.c
sound/soc/codecs/tas2562.h
sound/soc/fsl/fsl-asoc-card.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass.h
sound/soc/soc-core.c
sound/soc/soc-topology.c
sound/soc/sof/pm.c
tools/arch/mips/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/x86/include/asm/disabled-features.h
tools/bootconfig/include/linux/bootconfig.h
tools/bootconfig/main.c
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/linux/in.h
tools/lib/bpf/xsk.c
tools/objtool/arch/x86/decode.c
tools/objtool/elf.c
tools/perf/Makefile.config
tools/perf/builtin-record.c
tools/perf/check-headers.sh
tools/perf/tests/attr/base-record
tools/perf/tests/shell/stat_bpf_counters.sh
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/bpf_counter.c
tools/perf/util/dwarf-aux.c
tools/perf/util/env.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/machine.c
tools/perf/util/metricgroup.c
tools/perf/util/perf_api_probe.c
tools/perf/util/perf_api_probe.h
tools/perf/util/probe-finder.c
tools/perf/util/session.c
tools/perf/util/stat-display.c
tools/perf/util/symbol-elf.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/and.c
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/bpf/verifier/dead_code.c
tools/testing/selftests/bpf/verifier/jmp32.c
tools/testing/selftests/bpf/verifier/jset.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/perf_test_util.c
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/memslot_perf_test.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/icmp.sh [new file with mode: 0755]
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/net/udpgro_fwd.sh
tools/testing/selftests/net/veth.sh
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/nft_fib.sh [new file with mode: 0755]
tools/testing/selftests/proc/.gitignore
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/kernel.config

index c24b147..15d4eaa 100644 (file)
@@ -109,8 +109,8 @@ ForEachMacros:
   - 'css_for_each_child'
   - 'css_for_each_descendant_post'
   - 'css_for_each_descendant_pre'
-  - 'cxl_for_each_cmd'
   - 'device_for_each_child_node'
+  - 'displayid_iter_for_each'
   - 'dma_fence_chain_for_each'
   - 'do_for_each_ftrace_op'
   - 'drm_atomic_crtc_for_each_plane'
@@ -136,6 +136,7 @@ ForEachMacros:
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
   - 'flow_action_for_each'
+  - 'for_each_acpi_dev_match'
   - 'for_each_active_dev_scope'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
@@ -171,7 +172,6 @@ ForEachMacros:
   - 'for_each_dapm_widgets'
   - 'for_each_dev_addr'
   - 'for_each_dev_scope'
-  - 'for_each_displayid_db'
   - 'for_each_dma_cap_mask'
   - 'for_each_dpcm_be'
   - 'for_each_dpcm_be_rollback'
@@ -179,6 +179,7 @@ ForEachMacros:
   - 'for_each_dpcm_fe'
   - 'for_each_drhd_unit'
   - 'for_each_dss_dev'
+  - 'for_each_dtpm_table'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
   - 'for_each_element'
@@ -215,6 +216,7 @@ ForEachMacros:
   - 'for_each_migratetype_order'
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
+  - 'for_each_msi_vector'
   - 'for_each_net'
   - 'for_each_net_continue_reverse'
   - 'for_each_netdev'
@@ -270,6 +272,12 @@ ForEachMacros:
   - 'for_each_prime_number_from'
   - 'for_each_process'
   - 'for_each_process_thread'
+  - 'for_each_prop_codec_conf'
+  - 'for_each_prop_dai_codec'
+  - 'for_each_prop_dai_cpu'
+  - 'for_each_prop_dlc_codecs'
+  - 'for_each_prop_dlc_cpus'
+  - 'for_each_prop_dlc_platforms'
   - 'for_each_property_of_node'
   - 'for_each_registered_fb'
   - 'for_each_requested_gpio'
@@ -430,6 +438,7 @@ ForEachMacros:
   - 'queue_for_each_hw_ctx'
   - 'radix_tree_for_each_slot'
   - 'radix_tree_for_each_tagged'
+  - 'rb_for_each'
   - 'rbtree_postorder_for_each_entry_safe'
   - 'rdma_for_each_block'
   - 'rdma_for_each_port'
index ce6c497..c79a787 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -243,6 +243,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
+Michel Lespinasse <michel@lespinasse.org>
+Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
+Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
 Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
 Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
 Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
index 32509b9..92b49bc 100644 (file)
@@ -149,6 +149,17 @@ properties:
     maxItems: 6
     $ref: /schemas/types.yaml#/definitions/uint32-array
 
+  sink-vdos-v1:
+    description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+      providing additional information corresponding to the product, the detailed bit
+      definitions and the order of each VDO can be found in
+      "USB Power Delivery Specification Revision 2.0, Version 1.3" chapter 6.4.4.3.1 Discover
+      Identity. User can specify the VDO array via VDO_IDH/_CERT/_PRODUCT/_CABLE/_AMA defined in
+      dt-bindings/usb/pd.h.
+    minItems: 3
+    maxItems: 6
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+
   op-sink-microwatt:
     description: Sink required operating power in microwatt, if source can't
       offer the power, Capability Mismatch is set. Required for power sink and
@@ -207,6 +218,10 @@ properties:
       SNK_READY for non-pd link.
     type: boolean
 
+dependencies:
+  sink-vdos-v1: [ 'sink-vdos' ]
+  sink-vdos: [ 'sink-vdos-v1' ]
+
 required:
   - compatible
 
index 33ee575..926be9a 100644 (file)
@@ -49,7 +49,7 @@ examples:
         #size-cells = <0>;
 
         adc@48 {
-            comatible = "ti,ads7828";
+            compatible = "ti,ads7828";
             reg = <0x48>;
             vref-supply = <&vref>;
             ti,differential-input;
index ce505a7..9cd56ff 100644 (file)
@@ -67,9 +67,7 @@ properties:
     maxItems: 1
 
   clock-names:
-    maxItems: 1
-    items:
-      - const: fck
+    const: fck
 
   resets:
     maxItems: 1
index db61f07..2e35aea 100644 (file)
@@ -57,7 +57,7 @@ patternProperties:
           rate
 
       sound-dai:
-        $ref: /schemas/types.yaml#/definitions/phandle
+        $ref: /schemas/types.yaml#/definitions/phandle-array
         description: phandle of the CPU DAI
 
     patternProperties:
@@ -71,7 +71,7 @@ patternProperties:
 
         properties:
           sound-dai:
-            $ref: /schemas/types.yaml#/definitions/phandle
+            $ref: /schemas/types.yaml#/definitions/phandle-array
             description: phandle of the codec DAI
 
         required:
index 329d320..b7f9893 100644 (file)
@@ -58,6 +58,6 @@ RISC-V Linux Kernel SV39
                                                               |
   ____________________________________________________________|____________________________________________________________
                     |            |                  |         |
-   ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | modules
-   ffffffff80000000 |   -2    GB | ffffffffffffffff |    2 GB | kernel, BPF
+   ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | modules, BPF
+   ffffffff80000000 |   -2    GB | ffffffffffffffff |    2 GB | kernel
   __________________|____________|__________________|_________|____________________________________________________________
index 5bfe28b..20d85da 100644 (file)
@@ -171,8 +171,8 @@ Shadow pages contain the following information:
     shadow pages) so role.quadrant takes values in the range 0..3.  Each
     quadrant maps 1GB virtual address space.
   role.access:
-    Inherited guest access permissions in the form uwx.  Note execute
-    permission is positive, not negative.
+    Inherited guest access permissions from the parent ptes in the form uwx.
+    Note execute permission is positive, not negative.
   role.invalid:
     The page is invalid and should not be used.  It is a root page that is
     currently pinned (by a cpu hardware register pointing to it); once it is
index 03f294a..d302855 100644 (file)
@@ -181,7 +181,7 @@ SLUB Debug output
 Here is a sample of slub debug output::
 
  ====================================================================
- BUG kmalloc-8: Redzone overwritten
+ BUG kmalloc-8: Right Redzone overwritten
  --------------------------------------------------------------------
 
  INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
  INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
  INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
 
- Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
  Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
 Redzone 0xc90f6d28:  00 cc cc cc                                     .
 Padding 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
Object   (0xc90f6d20): 31 30 31 39 2e 30 30 35                         1019.005
Redzone  (0xc90f6d28): 00 cc cc cc                                     .
Padding  (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
 
    [<c010523d>] dump_trace+0x63/0x1eb
    [<c01053df>] show_trace_log_lvl+0x1a/0x2f
index 673cadd..8c5ee00 100644 (file)
@@ -3877,6 +3877,7 @@ L:        linux-btrfs@vger.kernel.org
 S:     Maintained
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
+C:     irc://irc.libera.chat/btrfs
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
@@ -12904,7 +12905,7 @@ F:      net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/
@@ -12917,7 +12918,7 @@ F:      net/nfc/
 NFC VIRTUAL NCI DEVICE DRIVER
 M:     Bongsu Jeon <bongsu.jeon@samsung.com>
 L:     netdev@vger.kernel.org
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/virtual_ncidev.c
 F:     tools/testing/selftests/nci/
@@ -13215,7 +13216,7 @@ F:      sound/soc/codecs/tfa9879*
 
 NXP-NCI NFC DRIVER
 R:     Charles Gorand <charles.gorand@effinnov.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/nxp-nci
 
@@ -14118,6 +14119,7 @@ F:      drivers/pci/controller/pci-v3-semi.c
 PCI ENDPOINT SUBSYSTEM
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+R:     Krzysztof WilczyÅ„ski <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/endpoint/*
@@ -14166,6 +14168,7 @@ F:      drivers/pci/controller/pci-xgene-msi.c
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 R:     Rob Herring <robh@kernel.org>
+R:     Krzysztof WilczyÅ„ski <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 Q:     http://patchwork.ozlabs.org/project/linux-pci/list/
@@ -16144,7 +16147,7 @@ F:      include/media/drv-intf/s3c_camif.h
 SAMSUNG S3FWRN5 NFC DRIVER
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
 F:     drivers/nfc/s3fwrn5
@@ -16557,6 +16560,7 @@ F:      drivers/misc/sgi-xp/
 
 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
 M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Guvenc Gulce <guvenc@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -18334,7 +18338,7 @@ F:      sound/soc/codecs/tas571x*
 TI TRF7970A NFC DRIVER
 M:     Mark Greer <mgreer@animalcreek.com>
 L:     linux-wireless@vger.kernel.org
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     Documentation/devicetree/bindings/net/nfc/trf7970a.txt
 F:     drivers/nfc/trf7970a.c
@@ -18870,6 +18874,13 @@ S:     Maintained
 F:     drivers/usb/host/isp116x*
 F:     include/linux/usb/isp116x.h
 
+USB ISP1760 DRIVER
+M:     Rui Miguel Silva <rui.silva@linaro.org>
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+F:     drivers/usb/isp1760/*
+F:     Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+
 USB LAN78XX ETHERNET DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
 M:     UNGLinuxDriver@microchip.com
index b79e0e8..3e8dbe6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
-NAME = Frozen Wasteland
+EXTRAVERSION = -rc7
+NAME = Opossums on Parade
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -929,11 +929,14 @@ CC_FLAGS_LTO      += -fvisibility=hidden
 # Limit inlining across translation units to reduce binary size
 KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
 
-# Check for frame size exceeding threshold during prolog/epilog insertion.
+# Check for frame size exceeding threshold during prolog/epilog insertion
+# when using lld < 13.0.0.
 ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
 KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
 endif
 endif
+endif
 
 ifdef CONFIG_LTO
 KBUILD_CFLAGS  += -fno-lto $(CC_FLAGS_LTO)
index 95f8a43..7a5449d 100644 (file)
@@ -18,6 +18,7 @@
  */
 struct sigcontext {
        struct user_regs_struct regs;
+       struct user_regs_arcv2 v2abi;
 };
 
 #endif /* _ASM_ARC_SIGCONTEXT_H */
index b3ccb9e..cb2f885 100644 (file)
@@ -61,6 +61,41 @@ struct rt_sigframe {
        unsigned int sigret_magic;
 };
 
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+       int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+       struct user_regs_arcv2 v2abi;
+
+       v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       v2abi.r58 = regs->r58;
+       v2abi.r59 = regs->r59;
+#else
+       v2abi.r58 = v2abi.r59 = 0;
+#endif
+       err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+       return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+       int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+       struct user_regs_arcv2 v2abi;
+
+       err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+       regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       regs->r58 = v2abi.r58;
+       regs->r59 = v2abi.r59;
+#endif
+#endif
+       return err;
+}
+
 static int
 stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
               sigset_t *set)
@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
 
        err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+       if (is_isa_arcv2())
+               err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
        return err ? -EFAULT : 0;
@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
        err |= __copy_from_user(&uregs.scratch,
                                &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+       if (is_isa_arcv2())
+               err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
        if (err)
                return -EFAULT;
 
index 33ce59d..e2146a8 100644 (file)
@@ -57,7 +57,6 @@ SECTIONS
        .init.ramfs : { INIT_RAM_FS }
 
        . = ALIGN(PAGE_SIZE);
-       _stext = .;
 
        HEAD_TEXT_SECTION
        INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
 
        .text : {
                _text = .;
+               _stext = .;
                TEXT_TEXT
                SCHED_TEXT
                CPUIDLE_TEXT
index 7d2c725..9148a01 100644 (file)
        phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
        phy-reset-duration = <20>;
        phy-supply = <&sw2_reg>;
-       phy-handle = <&ethphy0>;
        status = "okay";
 
+       fixed-link {
+               speed = <1000>;
+               full-duplex;
+       };
+
        mdio {
                #address-cells = <1>;
                #size-cells = <0>;
index 236fc20..d0768ae 100644 (file)
        vin-supply = <&sw1_reg>;
 };
 
+&reg_pu {
+       vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+       vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+       vin-supply = <&sw2_reg>;
+};
+
 &uart1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_uart1>;
index 828cf3e..c4e146f 100644 (file)
                compatible = "nxp,pca8574";
                reg = <0x3a>;
                gpio-controller;
-               #gpio-cells = <1>;
+               #gpio-cells = <2>;
        };
 };
 
index 5339210..dd8003b 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
        keep-power-in-suspend;
-       tuning-step = <2>;
+       fsl,tuning-step = <2>;
        vmmc-supply = <&reg_3p3v>;
        no-1-8-v;
        broken-cd;
index e57da0d..e519897 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        bus-width = <4>;
-       tuning-step = <2>;
+       fsl,tuning-step = <2>;
        vmmc-supply = <&reg_3p3v>;
        wakeup-source;
        no-1-8-v;
index 0d67ed6..bc4ffa7 100644 (file)
@@ -7,9 +7,11 @@
 #ifdef CONFIG_CPU_IDLE
 extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
 #else
 static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
 #endif
 
 /* Common ARM WFI state */
@@ -42,8 +44,7 @@ struct of_cpuidle_method {
 
 #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)                 \
        static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
-       __used __section("__cpuidle_method_of_table")                   \
-       = { .method = _method, .ops = _ops }
+       __cpuidle_method_section = { .method = _method, .ops = _ops }
 
 extern int arm_cpuidle_suspend(int index);
 
index 1a5edf5..73ca779 100644 (file)
@@ -545,9 +545,11 @@ void notrace cpu_init(void)
         * In Thumb-2, msr with an immediate value is not allowed.
         */
 #ifdef CONFIG_THUMB2_KERNEL
-#define PLC    "r"
+#define PLC_l  "l"
+#define PLC_r  "r"
 #else
-#define PLC    "I"
+#define PLC_l  "I"
+#define PLC_r  "I"
 #endif
 
        /*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
        "msr    cpsr_c, %9"
            :
            : "r" (stk),
-             PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
              "I" (offsetof(struct stack, irq[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
              "I" (offsetof(struct stack, abt[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
              "I" (offsetof(struct stack, und[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+             PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
              "I" (offsetof(struct stack, fiq[0])),
-             PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+             PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
            : "r14");
 #endif
 }
index 020e6de..237e8aa 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/suspend.h>
 #include <linux/io.h>
 
+#include "common.h"
 #include "hardware.h"
 
 static int mx27_suspend_enter(suspend_state_t state)
index 2ee527c..1026a81 100644 (file)
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
 
 #ifdef CONFIG_LEDS_TRIGGERS
 DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
-       /*
-        * turn on camera LED
-        */
-       if (power)
-               led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
-       else
-               led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
-       return 0;
-}
-#else
-#define ams_delta_camera_power NULL
 #endif
 
 static struct platform_device ams_delta_audio_device = {
index c40cf5e..977b0b7 100644 (file)
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
 {
        if (!IS_BUILTIN(CONFIG_TPS65010))
                return -ENOSYS;
-       
+
        tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
                                TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
 
@@ -394,6 +394,8 @@ static void __init h2_init(void)
        BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
        gpio_direction_input(H2_NAND_RB_GPIO_PIN);
 
+       gpiod_add_lookup_table(&isp1301_gpiod_table);
+
        omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
        omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
 
index 2c1e2b3..a745d64 100644 (file)
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
                irq = INT_7XX_WAKE_UP_REQ;
        else if (cpu_is_omap16xx())
                irq = INT_1610_WAKE_UP_REQ;
-       if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
-                       NULL))
-               pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+       else
+               irq = -1;
+
+       if (irq >= 0) {
+               if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+                       pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+       }
 
        /* Program new power ramp-up time
         * (0 for most boards since we don't lower voltage when in deep sleep)
index 418a61e..5e86145 100644 (file)
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
 
 static void n8x0_mmc_callback(void *data, u8 card_mask)
 {
+#ifdef CONFIG_MMC_OMAP
        int bit, *openp, index;
 
        if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
        else
                *openp = 0;
 
-#ifdef CONFIG_MMC_OMAP
        omap_mmc_notify_cover_event(mmc_device, index, *openp);
 #else
        pr_warn("MMC: notify cover event not available\n");
index 6409b47..7336c1f 100644 (file)
@@ -165,6 +165,7 @@ config ARCH_MEDIATEK
 
 config ARCH_MESON
        bool "Amlogic Platforms"
+       select COMMON_CLK
        select MESON_IRQ_GPIO
        help
          This enables support for the arm64 based Amlogic SoCs
index 6c309b9..e8d3127 100644 (file)
@@ -46,7 +46,8 @@
                        eee-broken-100tx;
                        qca,clk-out-frequency = <125000000>;
                        qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-                       vddio-supply = <&vddh>;
+                       qca,keep-pll-enabled;
+                       vddio-supply = <&vddio>;
 
                        vddio: vddio-regulator {
                                regulator-name = "VDDIO";
index df212ed..e65d1c4 100644 (file)
                        reg = <0x4>;
                        eee-broken-1000t;
                        eee-broken-100tx;
-
                        qca,clk-out-frequency = <125000000>;
                        qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
-                       vddio-supply = <&vddh>;
+                       qca,keep-pll-enabled;
+                       vddio-supply = <&vddio>;
 
                        vddio: vddio-regulator {
                                regulator-name = "VDDIO";
index eca06a0..a30249e 100644 (file)
                ddr: memory-controller@1080000 {
                        compatible = "fsl,qoriq-memory-controller";
                        reg = <0x0 0x1080000 0x0 0x1000>;
-                       interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-                       big-endian;
+                       interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+                       little-endian;
                };
 
                dcfg: syscon@1e00000 {
index 631e01c..be1e7d6 100644 (file)
                pinctrl-0 = <&pinctrl_codec2>;
                reg = <0x18>;
                #sound-dai-cells = <0>;
-               HPVDD-supply = <&reg_3p3v>;
-               SPRVDD-supply = <&reg_3p3v>;
-               SPLVDD-supply = <&reg_3p3v>;
-               AVDD-supply = <&reg_3p3v>;
-               IOVDD-supply = <&reg_3p3v>;
+               HPVDD-supply = <&reg_gen_3p3>;
+               SPRVDD-supply = <&reg_gen_3p3>;
+               SPLVDD-supply = <&reg_gen_3p3>;
+               AVDD-supply = <&reg_gen_3p3>;
+               IOVDD-supply = <&reg_gen_3p3>;
                DVDD-supply = <&vgen4_reg>;
                reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
        };
index 4dc8383..a08a568 100644 (file)
@@ -45,8 +45,8 @@
        reg_12p0_main: regulator-12p0-main {
                compatible = "regulator-fixed";
                regulator-name = "12V_MAIN";
-               regulator-min-microvolt = <5000000>;
-               regulator-max-microvolt = <5000000>;
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
                regulator-always-on;
        };
 
                regulator-always-on;
        };
 
-       reg_3p3v: regulator-3p3v {
-               compatible = "regulator-fixed";
-               vin-supply = <&reg_3p3_main>;
-               regulator-name = "GEN_3V3";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-               regulator-always-on;
-       };
-
        reg_usdhc2_vmmc: regulator-vsd-3v3 {
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_usdhc2>;
                pinctrl-0 = <&pinctrl_codec1>;
                reg = <0x18>;
                #sound-dai-cells = <0>;
-               HPVDD-supply = <&reg_3p3v>;
-               SPRVDD-supply = <&reg_3p3v>;
-               SPLVDD-supply = <&reg_3p3v>;
-               AVDD-supply = <&reg_3p3v>;
-               IOVDD-supply = <&reg_3p3v>;
+               HPVDD-supply = <&reg_gen_3p3>;
+               SPRVDD-supply = <&reg_gen_3p3>;
+               SPLVDD-supply = <&reg_gen_3p3>;
+               AVDD-supply = <&reg_gen_3p3>;
+               IOVDD-supply = <&reg_gen_3p3>;
                DVDD-supply = <&vgen4_reg>;
                reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
        };
index b2bcbf2..ca59d1f 100644 (file)
                };
        };
 
-       dmss: dmss {
+       dmss: bus@48000000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
                dma-ranges;
-               ranges;
+               ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
 
                ti,sci-dev-id = <25>;
 
                };
        };
 
-       dmsc: dmsc@44043000 {
+       dmsc: system-controller@44043000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
                mbox-names = "rx", "tx";
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
                clocks = <&k3_clks 145 0>;
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index 99e94de..deb19ae 100644 (file)
@@ -74,8 +74,9 @@
                clocks = <&k3_clks 148 0>;
        };
 
-       mcu_gpio_intr: interrupt-controller1 {
+       mcu_gpio_intr: interrupt-controller@4210000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x04210000 0x00 0x200>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index cb340d1..6cd3131 100644 (file)
                #phy-cells = <0>;
        };
 
-       intr_main_gpio: interrupt-controller0 {
+       intr_main_gpio: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x0 0x00a00000 0x0 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                ti,interrupt-ranges = <0 392 32>;
        };
 
-       main-navss {
+       main_navss: bus@30800000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
                dma-coherent;
                dma-ranges;
 
                ti,sci-dev-id = <118>;
 
-               intr_main_navss: interrupt-controller1 {
+               intr_main_navss: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x0 0x310e0000 0x0 0x2000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index 0388c02..f5b8ef2 100644 (file)
                };
        };
 
-       mcu-navss {
+       mcu_navss: bus@28380000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
                dma-coherent;
                dma-ranges;
 
index ed42f13..7cb864b 100644 (file)
@@ -6,24 +6,24 @@
  */
 
 &cbass_wakeup {
-       dmsc: dmsc {
+       dmsc: system-controller@44083000 {
                compatible = "ti,am654-sci";
                ti,host-id = <12>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
 
                mbox-names = "rx", "tx";
 
                mboxes= <&secure_proxy_main 11>,
                        <&secure_proxy_main 13>;
 
+               reg-names = "debug_messages";
+               reg = <0x44083000 0x1000>;
+
                k3_pds: power-controller {
                        compatible = "ti,sci-pm-domain";
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -69,8 +69,9 @@
                power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
        };
 
-       intr_wkup_gpio: interrupt-controller2 {
+       intr_wkup_gpio: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x42200000 0x200>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index 9e87fb3..eddb2ff 100644 (file)
                        gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
                };
        };
-
-       clk_ov5640_fixed: clock {
-               compatible = "fixed-clock";
-               #clock-cells = <0>;
-               clock-frequency = <24000000>;
-       };
 };
 
 &wkup_pmx0 {
        pinctrl-names = "default";
        pinctrl-0 = <&main_i2c1_pins_default>;
        clock-frequency = <400000>;
-
-       ov5640: camera@3c {
-               compatible = "ovti,ov5640";
-               reg = <0x3c>;
-
-               clocks = <&clk_ov5640_fixed>;
-               clock-names = "xclk";
-
-               port {
-                       csi2_cam0: endpoint {
-                               remote-endpoint = <&csi2_phy0>;
-                               clock-lanes = <0>;
-                               data-lanes = <1 2>;
-                       };
-               };
-       };
-
 };
 
 &main_i2c2 {
        };
 };
 
-&csi2_0 {
-       csi2_phy0: endpoint {
-               remote-endpoint = <&csi2_cam0>;
-               clock-lanes = <0>;
-               data-lanes = <1 2>;
-       };
-};
-
 &mcu_cpsw {
        pinctrl-names = "default";
        pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
index f86c493..19fea8a 100644 (file)
@@ -68,8 +68,9 @@
                };
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                #size-cells = <2>;
                ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
                ti,sci-dev-id = <199>;
+               dma-coherent;
+               dma-ranges;
 
-               main_navss_intr: interrupt-controller1 {
+               main_navss_intr: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x00 0x310e0000 0x00 0x4000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index 5e74e43..5663fe3 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 &cbass_mcu_wakeup {
-       dmsc: dmsc@44083000 {
+       dmsc: system-controller@44083000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
 
@@ -23,7 +23,7 @@
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -96,8 +96,9 @@
                clock-names = "fclk";
        };
 
-       wkup_gpio_intr: interrupt-controller2 {
+       wkup_gpio_intr: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x42200000 0x00 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index c2aa45a..3bcafe4 100644 (file)
@@ -76,8 +76,9 @@
                };
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                ti,interrupt-ranges = <8 392 56>;
        };
 
-       main-navss {
+       main_navss: bus@30000000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
                dma-coherent;
                dma-ranges;
 
                ti,sci-dev-id = <199>;
 
-               main_navss_intr: interrupt-controller1 {
+               main_navss_intr: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x0 0x310e0000 0x0 0x4000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index d56e347..5e825e4 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 &cbass_mcu_wakeup {
-       dmsc: dmsc@44083000 {
+       dmsc: system-controller@44083000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
 
@@ -23,7 +23,7 @@
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -96,8 +96,9 @@
                clock-names = "fclk";
        };
 
-       wkup_gpio_intr: interrupt-controller2 {
+       wkup_gpio_intr: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x42200000 0x00 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                };
        };
 
-       mcu-navss {
+       mcu_navss: bus@28380000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
                dma-coherent;
                dma-ranges;
 
index a7bf0c8..830ab91 100644 (file)
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
 #define PM(p)  __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
 
 static inline void setup_protection_map(void)
 {
        protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[4]  = PVA(_PAGE_PRESENT);
-       protection_map[5]  = PVA(_PAGE_PRESENT);
-       protection_map[6]  = PVA(_PAGE_PRESENT);
-       protection_map[7]  = PVA(_PAGE_PRESENT);
+       protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+       protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[4]  = PM(_PAGE_PRESENT);
+       protection_map[5]  = PM(_PAGE_PRESENT);
+       protection_map[6]  = PM(_PAGE_PRESENT);
+       protection_map[7]  = PM(_PAGE_PRESENT);
 
        protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+       protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
                                _PAGE_NO_READ);
-       protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
-       protection_map[12] = PVA(_PAGE_PRESENT);
-       protection_map[13] = PVA(_PAGE_PRESENT);
-       protection_map[14] = PVA(_PAGE_PRESENT);
-       protection_map[15] = PVA(_PAGE_PRESENT);
+       protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+       protection_map[12] = PM(_PAGE_PRESENT);
+       protection_map[13] = PM(_PAGE_PRESENT);
+       protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+       protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 }
 
-#undef _PVA
 #undef PM
 
 void cpu_cache_init(void)
index 2d5c6be..93ce3ec 100644 (file)
@@ -50,7 +50,7 @@ l_yes:
 1098:  nop;                                    \
        .pushsection __jump_table, "aw";        \
        .long 1098b - ., LABEL - .;             \
-       FTR_ENTRY_LONG KEY;                     \
+       FTR_ENTRY_LONG KEY - .;                 \
        .popsection
 #endif
 
index 33fa5dd..714a35f 100644 (file)
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
        pgd_t *pgdir = init_mm.pgd;
        return __find_linux_pte(pgdir, ea, NULL, hshift);
 }
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+       pte_t *ptep;
+       phys_addr_t pa;
+       int hugepage_shift;
+
+       /*
+        * init_mm does not free page tables, and does not do THP. It may
+        * have huge pages from huge vmalloc / ioremap etc.
+        */
+       ptep = find_init_mm_pte(addr, &hugepage_shift);
+       if (WARN_ON(!ptep))
+               return 0;
+
+       pa = PFN_PHYS(pte_pfn(*ptep));
+
+       if (!hugepage_shift)
+               hugepage_shift = PAGE_SHIFT;
+
+       pa |= addr & ((1ul << hugepage_shift) - 1);
+
+       return pa;
+}
+
 /*
  * This is what we should always use. Any other lockless page table lookup needs
  * careful audit against THP split.
index f24cd53..3bbdcc8 100644 (file)
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
  */
 static inline unsigned long eeh_token_to_phys(unsigned long token)
 {
-       pte_t *ptep;
-       unsigned long pa;
-       int hugepage_shift;
-
-       /*
-        * We won't find hugepages here(this is iomem). Hence we are not
-        * worried about _PAGE_SPLITTING/collapse. Also we will not hit
-        * page table free, because of init_mm.
-        */
-       ptep = find_init_mm_pte(token, &hugepage_shift);
-       if (!ptep)
-               return token;
-
-       pa = pte_pfn(*ptep);
-
-       /* On radix we can do hugepage mappings for io, so handle that */
-       if (!hugepage_shift)
-               hugepage_shift = PAGE_SHIFT;
-
-       pa <<= PAGE_SHIFT;
-       pa |= token & ((1ul << hugepage_shift) - 1);
-       return pa;
+       return ppc_find_vmap_phys(token);
 }
 
 /*
index 51bbaae..c877f07 100644 (file)
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
 #ifdef CONFIG_PPC_INDIRECT_MMIO
 struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
 {
-       unsigned hugepage_shift;
        struct iowa_bus *bus;
        int token;
 
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
                bus = &iowa_busses[token - 1];
        else {
                unsigned long vaddr, paddr;
-               pte_t *ptep;
 
                vaddr = (unsigned long)PCI_FIX_ADDR(addr);
                if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
                        return NULL;
-               /*
-                * We won't find huge pages here (iomem). Also can't hit
-                * a page table free due to init_mm
-                */
-               ptep = find_init_mm_pte(vaddr, &hugepage_shift);
-               if (ptep == NULL)
-                       paddr = 0;
-               else {
-                       WARN_ON(hugepage_shift);
-                       paddr = pte_pfn(*ptep) << PAGE_SHIFT;
-               }
+
+               paddr = ppc_find_vmap_phys(vaddr);
+
                bus = iowa_pci_find(vaddr, paddr);
 
                if (bus == NULL)
index 57d6b85..2af89a5 100644 (file)
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        unsigned int order;
        unsigned int nio_pages, io_order;
        struct page *page;
-       size_t size_io = size;
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        memset(ret, 0, size);
 
        /* Set up tces to cover the allocated range */
-       size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
-       nio_pages = size_io >> tbl->it_page_shift;
-       io_order = get_iommu_order(size_io, tbl);
+       nio_pages = size >> tbl->it_page_shift;
+       io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
        if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                         void *vaddr, dma_addr_t dma_handle)
 {
        if (tbl) {
-               size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
-               unsigned int nio_pages = size_io >> tbl->it_page_shift;
+               unsigned int nio_pages;
 
+               size = PAGE_ALIGN(size);
+               nio_pages = size >> tbl->it_page_shift;
                iommu_free(tbl, dma_handle, nio_pages);
                size = PAGE_ALIGN(size);
                free_pages((unsigned long)vaddr, get_order(size));
index 01ab216..e8c2a63 100644 (file)
@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
        int ret = 0;
        struct kprobe *prev;
        struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
-       struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
 
        if ((unsigned long)p->addr & 0x03) {
                printk("Attempt to register kprobe at an unaligned address\n");
@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
        } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
                printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
                ret = -EINVAL;
-       } else if (ppc_inst_prefixed(prefix)) {
+       } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+                  ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
                printk("Cannot register a kprobe on the second word of prefixed instruction\n");
                ret = -EINVAL;
        }
index dca6648..f9e1f54 100644 (file)
@@ -902,6 +902,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
        unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
        user_write_access_end();
 
+       /* Save the siginfo outside of the unsafe block. */
+       if (copy_siginfo_to_user(&frame->info, &ksig->info))
+               goto badframe;
+
        /* Make sure signal handler doesn't get spurious FP exceptions */
        tsk->thread.fp_state.fpscr = 0;
 
@@ -915,11 +919,6 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
                regs->nip = (unsigned long) &frame->tramp[0];
        }
 
-
-       /* Save the siginfo outside of the unsafe block. */
-       if (copy_siginfo_to_user(&frame->info, &ksig->info))
-               goto badframe;
-
        /* Allocate a dummy caller frame for the signal handler. */
        newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
        err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
index 7360350..bc08136 100644 (file)
@@ -4455,7 +4455,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
                mtspr(SPRN_EBBRR, ebb_regs[1]);
                mtspr(SPRN_BESCR, ebb_regs[2]);
                mtspr(SPRN_TAR, user_tar);
-               mtspr(SPRN_FSCR, current->thread.fscr);
        }
        mtspr(SPRN_VRSAVE, user_vrsave);
 
index 7af7c70..7a0f124 100644 (file)
 #include <asm/pte-walk.h>
 
 /* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
 {
-       unsigned long addr = (unsigned long) x;
-       pte_t *p;
-       /*
-        * assume we don't have huge pages in vmalloc space...
-        * So don't worry about THP collapse/split. Called
-        * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
-        */
-       p = find_init_mm_pte(addr, NULL);
-       if (!p || !pte_present(*p))
-               return NULL;
-       addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
-       return __va(addr);
+       return __va(ppc_find_vmap_phys((unsigned long)addr));
 }
 
 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
index 5e634db..004f0d4 100644 (file)
@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 #define STACK_SLOT_UAMOR       (SFS-88)
 #define STACK_SLOT_DAWR1       (SFS-96)
 #define STACK_SLOT_DAWRX1      (SFS-104)
+#define STACK_SLOT_FSCR                (SFS-112)
 /* the following is used by the P9 short path */
 #define STACK_SLOT_NVGPRS      (SFS-152)       /* 18 gprs */
 
@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION
        std     r6, STACK_SLOT_DAWR0(r1)
        std     r7, STACK_SLOT_DAWRX0(r1)
        std     r8, STACK_SLOT_IAMR(r1)
+       mfspr   r5, SPRN_FSCR
+       std     r5, STACK_SLOT_FSCR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 BEGIN_FTR_SECTION
        mfspr   r6, SPRN_DAWR1
@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE
        ld      r7, STACK_SLOT_HFSCR(r1)
        mtspr   SPRN_HFSCR, r7
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+       ld      r5, STACK_SLOT_FSCR(r1)
+       mtspr   SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        /*
         * Restore various registers to 0, where non-zero values
         * set by the guest could disrupt the host.
index 043bbea..a6b36a4 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/machdep.h>
 #include <asm/rtas.h>
 #include <asm/kasan.h>
+#include <asm/sparsemem.h>
 #include <asm/svm.h>
 
 #include <mm/mmu_decl.h>
index 16d4d1b..5162241 100644 (file)
@@ -2254,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
        bool use_siar = regs_use_siar(regs);
        unsigned long siar = mfspr(SPRN_SIAR);
 
-       if (ppmu->flags & PPMU_P10_DD1) {
+       if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
                if (siar)
                        return siar;
                else
index c5914e7..18ec0f9 100644 (file)
@@ -61,11 +61,11 @@ config RISCV
        select GENERIC_TIME_VSYSCALL if MMU && 64BIT
        select HANDLE_DOMAIN_IRQ
        select HAVE_ARCH_AUDITSYSCALL
-       select HAVE_ARCH_JUMP_LABEL
-       select HAVE_ARCH_JUMP_LABEL_RELATIVE
+       select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+       select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && 64BIT
        select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
-       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_KGDB if !XIP_KERNEL
        select HAVE_ARCH_KGDB_QXFER_PKT
        select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP_FILTER
@@ -80,9 +80,9 @@ config RISCV
        select HAVE_GCC_PLUGINS
        select HAVE_GENERIC_VDSO if MMU && 64BIT
        select HAVE_IRQ_TIME_ACCOUNTING
-       select HAVE_KPROBES
-       select HAVE_KPROBES_ON_FTRACE
-       select HAVE_KRETPROBES
+       select HAVE_KPROBES if !XIP_KERNEL
+       select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+       select HAVE_KRETPROBES if !XIP_KERNEL
        select HAVE_PCI
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
@@ -231,11 +231,11 @@ config ARCH_RV64I
        bool "RV64I"
        select 64BIT
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
-       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+       select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
-       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
        select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select SWIOTLB if MMU
 
 endchoice
index ed96376..30676eb 100644 (file)
@@ -14,6 +14,7 @@ config SOC_SIFIVE
        select CLK_SIFIVE
        select CLK_SIFIVE_PRCI
        select SIFIVE_PLIC
+       select RISCV_ERRATA_ALTERNATIVE
        select ERRATA_SIFIVE
        help
          This enables support for SiFive SoC platform hardware.
index 3eb9590..99ecd8b 100644 (file)
@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
        CC_FLAGS_FTRACE := -fpatchable-function-entry=8
 endif
 
-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
 KBUILD_CFLAGS_MODULE += -mcmodel=medany
 endif
 
@@ -38,6 +38,15 @@ else
        KBUILD_LDFLAGS += -melf32lriscv
 endif
 
+ifeq ($(CONFIG_LD_IS_LLD),y)
+       KBUILD_CFLAGS += -mno-relax
+       KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+       KBUILD_CFLAGS += -Wa,-mno-relax
+       KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
 # ISA string setting
 riscv-march-$(CONFIG_ARCH_RV32I)       := rv32ima
 riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
index 622b127..855c150 100644 (file)
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
index 74c47fe..d90e4eb 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
                            hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
index 8eef82e..abbb960 100644 (file)
                        cache-size = <2097152>;
                        cache-unified;
                        interrupt-parent = <&plic0>;
-                       interrupts = <19 20 21 22>;
+                       interrupts = <19 21 22 20>;
                        reg = <0x0 0x2010000 0x0 0x1000>;
                };
                gpio: gpio@10060000 {
index bdd5fc8..2fde48d 100644 (file)
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
 obj-y += errata.o
index 88c0870..67406c3 100644 (file)
@@ -51,7 +51,7 @@
        REG_ASM " " newlen "\n" \
        ".word " errata_id "\n"
 
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
        ".if " __stringify(enable) " == 1\n"                            \
        ".pushsection .alternative, \"a\"\n"                            \
        ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
        "886 :\n"       \
        old_c "\n"      \
        "887 :\n"       \
-       ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+       ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
 
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
index 9469f46..380cd3a 100644 (file)
@@ -30,9 +30,8 @@
 
 #define BPF_JIT_REGION_SIZE    (SZ_128M)
 #ifdef CONFIG_64BIT
-/* KASLR should leave at least 128MB for BPF after the kernel */
-#define BPF_JIT_REGION_START   PFN_ALIGN((unsigned long)&_end)
-#define BPF_JIT_REGION_END     (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_START   (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END     (MODULES_END)
 #else
 #define BPF_JIT_REGION_START   (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
 #define BPF_JIT_REGION_END     (VMALLOC_END)
index 03901d3..9a1b7a0 100644 (file)
@@ -231,13 +231,13 @@ static void __init init_resources(void)
 
        /* Clean-up any unused pre-allocated resources */
        mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
        return;
 
  error:
        /* Better an empty resource tree than an inconsistent one */
        release_child_resources(&iomem_resource);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
 }
 
 
index 0721b97..7bc88d8 100644 (file)
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
        }
 }
 
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section         __section(".xip.traps")
+#else
+#define __trap_section
+#endif
 #define DO_ERROR_INFO(name, signo, code, str)                          \
-asmlinkage __visible void name(struct pt_regs *regs)                   \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs)    \
 {                                                                      \
        do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
 }
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
 int handle_misaligned_load(struct pt_regs *regs);
 int handle_misaligned_store(struct pt_regs *regs);
 
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
 {
        if (!handle_misaligned_load(regs))
                return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
                      "Oops - load address misaligned");
 }
 
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
 {
        if (!handle_misaligned_store(regs))
                return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
        return GET_INSN_LENGTH(insn);
 }
 
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
 {
 #ifdef CONFIG_KPROBES
        if (kprobe_single_step_handler(regs))
index 4b29b99..a3ff09c 100644 (file)
@@ -99,9 +99,22 @@ SECTIONS
        }
        PERCPU_SECTION(L1_CACHE_BYTES)
 
-       . = ALIGN(PAGE_SIZE);
+       . = ALIGN(8);
+       .alternative : {
+               __alt_start = .;
+               *(.alternative)
+               __alt_end = .;
+       }
        __init_end = .;
 
+       . = ALIGN(16);
+       .xip.traps : {
+               __xip_traps_start = .;
+               *(.xip.traps)
+               __xip_traps_end = .;
+       }
+
+       . = ALIGN(PAGE_SIZE);
        .sdata : {
                __global_pointer$ = . + 0x800;
                *(.sdata*)
index 4faf8bd..4c4c92c 100644 (file)
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
        unsigned long init_data_start = (unsigned long)__init_data_begin;
        unsigned long rodata_start = (unsigned long)__start_rodata;
        unsigned long data_start = (unsigned long)_data;
-       unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+       unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+       unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
 
        set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
        set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
        set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
        /* rodata section is marked readonly in mark_rodata_ro */
        set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-       set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+       set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
 }
 
 void mark_rodata_ro(void)
index 9daacae..d7189c8 100644 (file)
@@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
 
 void __init kasan_init(void)
 {
-       phys_addr_t _start, _end;
+       phys_addr_t p_start, p_end;
        u64 i;
 
        /*
@@ -189,9 +189,9 @@ void __init kasan_init(void)
                        (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
 
        /* Populate the linear mapping */
-       for_each_mem_range(i, &_start, &_end) {
-               void *start = (void *)__va(_start);
-               void *end = (void *)__va(_end);
+       for_each_mem_range(i, &p_start, &p_end) {
+               void *start = (void *)__va(p_start);
+               void *end = (void *)__va(p_end);
 
                if (start >= end)
                        break;
@@ -201,7 +201,7 @@ void __init kasan_init(void)
 
        /* Populate kernel, BPF, modules mapping */
        kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
-                      kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
+                      kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
 
        for (i = 0; i < PTRS_PER_PTE; i++)
                set_pte(&kasan_early_shadow_pte[i],
index 12de7a9..9cc71ca 100644 (file)
@@ -651,9 +651,9 @@ ENDPROC(stack_overflow)
 .Lcleanup_sie_mcck:
        larl    %r13,.Lsie_entry
        slgr    %r9,%r13
-       larl    %r13,.Lsie_skip
+       lghi    %r13,.Lsie_skip - .Lsie_entry
        clgr    %r9,%r13
-       jh      .Lcleanup_sie_int
+       jhe     .Lcleanup_sie_int
        oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
 .Lcleanup_sie_int:
        BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
index 3075294..cb5e8d3 100644 (file)
@@ -200,8 +200,9 @@ endif
 KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
 
 ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
 endif
 
 ifdef CONFIG_X86_NEED_RELOCS
index 63f0972..3a75a2c 100644 (file)
@@ -1406,6 +1406,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                                                die_id = i;
                                        else
                                                die_id = topology_phys_to_logical_pkg(i);
+                                       if (die_id < 0)
+                                               die_id = -ENODEV;
                                        map->pbus_to_dieid[bus] = die_id;
                                        break;
                                }
@@ -1452,14 +1454,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                        i = -1;
                        if (reverse) {
                                for (bus = 255; bus >= 0; bus--) {
-                                       if (map->pbus_to_dieid[bus] >= 0)
+                                       if (map->pbus_to_dieid[bus] != -1)
                                                i = map->pbus_to_dieid[bus];
                                        else
                                                map->pbus_to_dieid[bus] = i;
                                }
                        } else {
                                for (bus = 0; bus <= 255; bus++) {
-                                       if (map->pbus_to_dieid[bus] >= 0)
+                                       if (map->pbus_to_dieid[bus] != -1)
                                                i = map->pbus_to_dieid[bus];
                                        else
                                                map->pbus_to_dieid[bus] = i;
@@ -5097,9 +5099,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
        .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
        .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
+       .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
        .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
        .ops            = &snr_m2m_uncore_pci_ops,
-       .format_group   = &skx_uncore_format_group,
+       .format_group   = &snr_m2m_uncore_format_group,
 };
 
 static struct attribute *icx_upi_uncore_formats_attr[] = {
index 412b51e..48067af 100644 (file)
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
 extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
 extern void lapic_assign_system_vectors(void);
 extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
 extern void lapic_online(void);
 extern void lapic_offline(void);
 extern bool apic_needs_pit(void);
index b7dd944..8f28faf 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD        0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD         (1 << (X86_FEATURE_ENQCMD & 31))
 
 #ifdef CONFIG_X86_SGX
 # define DISABLE_SGX   0
index ed33a14..23bef08 100644 (file)
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
  */
 #define PASID_DISABLED 0
 
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
 static inline void update_pasid(void) { }
-#endif
+
 #endif /* _ASM_X86_FPU_API_H */
index 8d33ad8..fdee23e 100644 (file)
@@ -578,19 +578,19 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
         * PKRU state is switched eagerly because it needs to be valid before we
         * return to userland e.g. for a copy_to_user() operation.
         */
-       if (current->mm) {
+       if (!(current->flags & PF_KTHREAD)) {
+               /*
+                * If the PKRU bit in xsave.header.xfeatures is not set,
+                * then the PKRU component was in init state, which means
+                * XRSTOR will set PKRU to 0. If the bit is not set then
+                * get_xsave_addr() will return NULL because the PKRU value
+                * in memory is not valid. This means pkru_val has to be
+                * set to 0 and not to init_pkru_value.
+                */
                pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
-               if (pk)
-                       pkru_val = pk->pkru;
+               pkru_val = pk ? pk->pkru : 0;
        }
        __write_pkru(pkru_val);
-
-       /*
-        * Expensive PASID MSR write will be avoided in update_pasid() because
-        * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
-        * unless it's different from mm->pasid to reduce overhead.
-        */
-       update_pasid();
 }
 
 #endif /* _ASM_X86_FPU_INTERNAL_H */
index ddbdefd..91a7b66 100644 (file)
@@ -3,11 +3,13 @@
 #define _ASM_X86_THERMAL_H
 
 #ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
 void intel_init_thermal(struct cpuinfo_x86 *c);
 bool x86_thermal_enabled(void);
 void intel_thermal_interrupt(void);
 #else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void)                                { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c)   { }
 #endif
 
 #endif /* _ASM_X86_THERMAL_H */
index 6974b51..6fe5b44 100644 (file)
@@ -182,42 +182,70 @@ done:
                n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
 }
 
+/*
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
+ */
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
+{
+       unsigned long flags;
+       int i = off, nnops;
+
+       while (i < instrlen) {
+               if (instr[i] != 0x90)
+                       break;
+
+               i++;
+       }
+
+       nnops = i - off;
+
+       if (nnops <= 1)
+               return nnops;
+
+       local_irq_save(flags);
+       add_nops(instr + off, nnops);
+       local_irq_restore(flags);
+
+       DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+       return nnops;
+}
+
 /*
  * "noinline" to cause control flow change and thus invalidate I$ and
  * cause refetch after modification.
  */
 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 {
-       unsigned long flags;
        struct insn insn;
-       int nop, i = 0;
+       int i = 0;
 
        /*
-        * Jump over the non-NOP insns, the remaining bytes must be single-byte
-        * NOPs, optimize them.
+        * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+        * ones.
         */
        for (;;) {
                if (insn_decode_kernel(&insn, &instr[i]))
                        return;
 
+               /*
+                * See if this and any potentially following NOPs can be
+                * optimized.
+                */
                if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
-                       break;
-
-               if ((i += insn.length) >= a->instrlen)
-                       return;
-       }
+                       i += optimize_nops_range(instr, a->instrlen, i);
+               else
+                       i += insn.length;
 
-       for (nop = i; i < a->instrlen; i++) {
-               if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
+               if (i >= a->instrlen)
                        return;
        }
-
-       local_irq_save(flags);
-       add_nops(instr + nop, i - nop);
-       local_irq_restore(flags);
-
-       DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
-                  instr, nop, a->instrlen);
 }
 
 /*
index 4a39fb4..d262811 100644 (file)
@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
        end_local_APIC_setup();
        irq_remap_enable_fault_handling();
        setup_IO_APIC();
+       lapic_update_legacy_vectors();
 }
 
 #ifdef CONFIG_UP_LATE_INIT
index 6dbdc7c..fb67ed5 100644 (file)
@@ -738,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
        irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
 }
 
+void __init lapic_update_legacy_vectors(void)
+{
+       unsigned int i;
+
+       if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+               return;
+
+       /*
+        * If the IO/APIC is disabled via config, kernel command line or
+        * lack of enumeration then all legacy interrupts are routed
+        * through the PIC. Make sure that they are marked as legacy
+        * vectors. PIC_CASCADE_IRQ has already been marked in
+        * lapic_assign_system_vectors().
+        */
+       for (i = 0; i < nr_legacy_irqs(); i++) {
+               if (i != PIC_CASCADE_IR)
+                       lapic_assign_legacy_vector(i, true);
+       }
+}
+
 void __init lapic_assign_system_vectors(void)
 {
        unsigned int i, vector = 0;
index 3ef5868..7aecb2f 100644 (file)
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
                case 15:
                        return msr - MSR_P4_BPU_PERFCTR0;
                }
-               fallthrough;
+               break;
        case X86_VENDOR_ZHAOXIN:
        case X86_VENDOR_CENTAUR:
                return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
                case 15:
                        return msr - MSR_P4_BSU_ESCR0;
                }
-               fallthrough;
+               break;
        case X86_VENDOR_ZHAOXIN:
        case X86_VENDOR_CENTAUR:
                return msr - MSR_ARCH_PERFMON_EVENTSEL0;
index 6ad165a..64511c4 100644 (file)
@@ -212,6 +212,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
                list_splice_tail(&secs_pages, &zombie_secs_pages);
        mutex_unlock(&zombie_secs_pages_lock);
 
+       xa_destroy(&vepc->page_array);
        kfree(vepc);
 
        return 0;
index a4ec653..ec3ae30 100644 (file)
@@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                return 0;
        }
 
-       if (!access_ok(buf, size))
-               return -EACCES;
+       if (!access_ok(buf, size)) {
+               ret = -EACCES;
+               goto out;
+       }
 
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_set(current, NULL,
-                                      0, sizeof(struct user_i387_ia32_struct),
-                                      NULL, buf) != 0;
+       if (!static_cpu_has(X86_FEATURE_FPU)) {
+               ret = fpregs_soft_set(current, NULL, 0,
+                                     sizeof(struct user_i387_ia32_struct),
+                                     NULL, buf);
+               goto out;
+       }
 
        if (use_xsave()) {
                struct _fpx_sw_bytes fx_sw_user;
@@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                        fpregs_unlock();
                        return 0;
                }
+
+               /*
+                * The above did an FPU restore operation, restricted to
+                * the user portion of the registers, and failed, but the
+                * microcode might have modified the FPU registers
+                * nevertheless.
+                *
+                * If the FPU registers do not belong to current, then
+                * invalidate the FPU register state otherwise the task might
+                * preempt current and return to user space with corrupted
+                * FPU registers.
+                *
+                * In case current owns the FPU registers then no further
+                * action is required. The fixup below will handle it
+                * correctly.
+                */
+               if (test_thread_flag(TIF_NEED_FPU_LOAD))
+                       __cpu_invalidate_fpregs_state();
+
                fpregs_unlock();
        } else {
                /*
@@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                 */
                ret = __copy_from_user(&env, buf, sizeof(env));
                if (ret)
-                       goto err_out;
+                       goto out;
                envp = &env;
        }
 
@@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
        if (use_xsave() && !fx_only) {
                u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
 
-               if (using_compacted_format()) {
-                       ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
-               } else {
-                       ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-
-                       if (!ret && state_size > offsetof(struct xregs_state, header))
-                               ret = validate_user_xstate_header(&fpu->state.xsave.header);
-               }
+               ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
                if (ret)
-                       goto err_out;
+                       goto out;
 
                sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
                                              fx_only);
@@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
                if (ret) {
                        ret = -EFAULT;
-                       goto err_out;
+                       goto out;
                }
 
                sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
@@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
        } else {
                ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
                if (ret)
-                       goto err_out;
+                       goto out;
 
                fpregs_lock();
                ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
@@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                fpregs_deactivate(fpu);
        fpregs_unlock();
 
-err_out:
+out:
        if (ret)
                fpu__clear_user_states(fpu);
        return ret;
index a85c640..d0eef96 100644 (file)
@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 #endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
-       u64 pasid_state;
-       u32 pasid;
-
-       if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
-               return;
-
-       if (!current->mm)
-               return;
-
-       pasid = READ_ONCE(current->mm->pasid);
-       /* Set the valid bit in the PASID MSR/state only for valid pasid. */
-       pasid_state = pasid == PASID_DISABLED ?
-                     pasid : pasid | MSR_IA32_PASID_VALID;
-
-       /*
-        * No need to hold fregs_lock() since the task's fpstate won't
-        * be changed by others (e.g. ptrace) while the task is being
-        * switched to or is in IPI.
-        */
-       if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-               /* The MSR is active and can be directly updated. */
-               wrmsrl(MSR_IA32_PASID, pasid_state);
-       } else {
-               struct fpu *fpu = &current->thread.fpu;
-               struct ia32_pasid_state *ppasid_state;
-               struct xregs_state *xsave;
-
-               /*
-                * The CPU's xstate registers are not currently active. Just
-                * update the PASID state in the memory buffer here. The
-                * PASID MSR will be loaded when returning to user mode.
-                */
-               xsave = &fpu->state.xsave;
-               xsave->header.xfeatures |= XFEATURE_MASK_PASID;
-               ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
-               /*
-                * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
-                * won't be NULL and no need to check its value.
-                *
-                * Only update the task's PASID state when it's different
-                * from the mm's pasid.
-                */
-               if (ppasid_state->pasid != pasid_state) {
-                       /*
-                        * Invalid fpregs so that state restoring will pick up
-                        * the PASID state.
-                        */
-                       __fpu_invalidate_fpregs_state(fpu);
-                       ppasid_state->pasid = pasid_state;
-               }
-       }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
index 72920af..1e72062 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/pci-direct.h>
 #include <asm/prom.h>
 #include <asm/proto.h>
+#include <asm/thermal.h>
 #include <asm/unwind.h>
 #include <asm/vsyscall.h>
 #include <linux/vmalloc.h>
@@ -637,11 +638,11 @@ static void __init trim_snb_memory(void)
         * them from accessing certain memory ranges, namely anything below
         * 1M and in the pages listed in bad_pages[] above.
         *
-        * To avoid these pages being ever accessed by SNB gfx devices
-        * reserve all memory below the 1 MB mark and bad_pages that have
-        * not already been reserved at boot time.
+        * To avoid these pages being ever accessed by SNB gfx devices reserve
+        * bad_pages that have not already been reserved at boot time.
+        * All memory below the 1 MB mark is anyway reserved later during
+        * setup_arch(), so there is no need to reserve it here.
         */
-       memblock_reserve(0, 1<<20);
 
        for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
                if (memblock_reserve(bad_pages[i], PAGE_SIZE))
@@ -733,14 +734,14 @@ static void __init early_reserve_memory(void)
         * The first 4Kb of memory is a BIOS owned area, but generally it is
         * not listed as such in the E820 table.
         *
-        * Reserve the first memory page and typically some additional
-        * memory (64KiB by default) since some BIOSes are known to corrupt
-        * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+        * Reserve the first 64K of memory since some BIOSes are known to
+        * corrupt low memory. After the real mode trampoline is allocated the
+        * rest of the memory below 640k is reserved.
         *
         * In addition, make sure page 0 is always reserved because on
         * systems with L1TF its contents can be leaked to user processes.
         */
-       memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+       memblock_reserve(0, SZ_64K);
 
        early_reserve_initrd();
 
@@ -751,6 +752,7 @@ static void __init early_reserve_memory(void)
 
        reserve_ibft_region();
        reserve_bios_regions();
+       trim_snb_memory();
 }
 
 /*
@@ -1081,14 +1083,20 @@ void __init setup_arch(char **cmdline_p)
                        (max_pfn_mapped<<PAGE_SHIFT) - 1);
 #endif
 
-       reserve_real_mode();
-
        /*
-        * Reserving memory causing GPU hangs on Sandy Bridge integrated
-        * graphics devices should be done after we allocated memory under
-        * 1M for the real mode trampoline.
+        * Find free memory for the real mode trampoline and place it
+        * there.
+        * If there is not enough free memory under 1M, on EFI-enabled
+        * systems there will be additional attempt to reclaim the memory
+        * for the real mode trampoline at efi_free_boot_services().
+        *
+        * Unconditionally reserve the entire first 1M of RAM because
+        * BIOSes are know to corrupt low memory and several
+        * hundred kilobytes are not worth complex detection what memory gets
+        * clobbered. Moreover, on machines with SandyBridge graphics or in
+        * setups that use crashkernel the entire 1M is reserved anyway.
         */
-       trim_snb_memory();
+       reserve_real_mode();
 
        init_mem_mapping();
 
@@ -1226,6 +1234,14 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.timers.wallclock_init();
 
+       /*
+        * This needs to run before setup_local_APIC() which soft-disables the
+        * local APIC temporarily and that masks the thermal LVT interrupt,
+        * leading to softlockups on machines which have configured SMI
+        * interrupt delivery.
+        */
+       therm_lvt_init();
+
        mcheck_init();
 
        register_refined_jiffies(CLOCK_TICK_RATE);
index 9a48f13..b4da665 100644 (file)
@@ -655,6 +655,7 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
                if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
                        entry->ecx = F(RDPID);
                ++array->nent;
+               break;
        default:
                break;
        }
index 8120e86..17fa4ab 100644 (file)
@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
        if (!apic_x2apic_mode(apic))
                valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
 
+       if (alignment + len > 4)
+               return 1;
+
        if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
                return 1;
 
@@ -1494,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
 
 static void cancel_hv_timer(struct kvm_lapic *apic);
 
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+       hrtimer_cancel(&apic->lapic_timer.timer);
+       preempt_disable();
+       if (apic->lapic_timer.hv_timer_in_use)
+               cancel_hv_timer(apic);
+       preempt_enable();
+}
+
 static void apic_update_lvtt(struct kvm_lapic *apic)
 {
        u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
        if (apic->lapic_timer.timer_mode != timer_mode) {
                if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
                                APIC_LVT_TIMER_TSCDEADLINE)) {
-                       hrtimer_cancel(&apic->lapic_timer.timer);
-                       preempt_disable();
-                       if (apic->lapic_timer.hv_timer_in_use)
-                               cancel_hv_timer(apic);
-                       preempt_enable();
+                       cancel_apic_timer(apic);
                        kvm_lapic_set_reg(apic, APIC_TMICT, 0);
                        apic->lapic_timer.period = 0;
                        apic->lapic_timer.tscdeadline = 0;
@@ -2092,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
                if (apic_lvtt_tscdeadline(apic))
                        break;
 
-               hrtimer_cancel(&apic->lapic_timer.timer);
+               cancel_apic_timer(apic);
                kvm_lapic_set_reg(apic, APIC_TMICT, val);
                start_apic_timer(apic);
                break;
index 0144c40..8d5876d 100644 (file)
@@ -4739,9 +4739,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
        context->inject_page_fault = kvm_inject_page_fault;
 }
 
+static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
+{
+       union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
+
+       /*
+        * Nested MMUs are used only for walking L2's gva->gpa, they never have
+        * shadow pages of their own and so "direct" has no meaning.   Set it
+        * to "true" to try to detect bogus usage of the nested MMU.
+        */
+       role.base.direct = true;
+
+       if (!is_paging(vcpu))
+               role.base.level = 0;
+       else if (is_long_mode(vcpu))
+               role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
+                                                      PT64_ROOT_4LEVEL;
+       else if (is_pae(vcpu))
+               role.base.level = PT32E_ROOT_LEVEL;
+       else
+               role.base.level = PT32_ROOT_LEVEL;
+
+       return role;
+}
+
 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 {
-       union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+       union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
        struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
 
        if (new_role.as_u64 == g_context->mmu_role.as_u64)
index 70b7e44..823a591 100644 (file)
@@ -90,8 +90,8 @@ struct guest_walker {
        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
        bool pte_writable[PT_MAX_FULL_LEVELS];
-       unsigned pt_access;
-       unsigned pte_access;
+       unsigned int pt_access[PT_MAX_FULL_LEVELS];
+       unsigned int pte_access;
        gfn_t gfn;
        struct x86_exception fault;
 };
@@ -418,13 +418,15 @@ retry_walk:
                }
 
                walker->ptes[walker->level - 1] = pte;
+
+               /* Convert to ACC_*_MASK flags for struct guest_walker.  */
+               walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
        } while (!is_last_gpte(mmu, walker->level, pte));
 
        pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
        accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
 
        /* Convert to ACC_*_MASK flags for struct guest_walker.  */
-       walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
        walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
        errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
        if (unlikely(errcode))
@@ -463,7 +465,8 @@ retry_walk:
        }
 
        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
-                __func__, (u64)pte, walker->pte_access, walker->pt_access);
+                __func__, (u64)pte, walker->pte_access,
+                walker->pt_access[walker->level - 1]);
        return 1;
 
 error:
@@ -643,7 +646,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
        bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
        struct kvm_mmu_page *sp = NULL;
        struct kvm_shadow_walk_iterator it;
-       unsigned direct_access, access = gw->pt_access;
+       unsigned int direct_access, access;
        int top_level, level, req_level, ret;
        gfn_t base_gfn = gw->gfn;
 
@@ -675,6 +678,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                sp = NULL;
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
+                       access = gw->pt_access[it.level - 2];
                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
                                              false, access);
                }
index 0e62e6a..5e7e920 100644 (file)
@@ -221,7 +221,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
        return &avic_physical_id_table[index];
 }
 
-/**
+/*
  * Note:
  * AVIC hardware walks the nested page table to check permissions,
  * but does not use the SPA address specified in the leaf page
@@ -764,7 +764,7 @@ out:
        return ret;
 }
 
-/**
+/*
  * Note:
  * The HW cannot support posting multicast/broadcast
  * interrupts to a vCPU. So, we still use legacy interrupt
@@ -1005,7 +1005,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
 }
 
-/**
+/*
  * This function is called during VCPU halt/unhalt.
  */
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
index 5bc887e..8d36f0c 100644 (file)
@@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
        sev->misc_cg = NULL;
 }
 
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
 {
        struct sev_data_decommission decommission;
+
+       if (!handle)
+               return;
+
+       decommission.handle = handle;
+       sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
        struct sev_data_deactivate deactivate;
 
        if (!handle)
@@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
        sev_guest_deactivate(&deactivate, NULL);
        up_read(&sev_deactivate_lock);
 
-       /* decommission handle */
-       decommission.handle = handle;
-       sev_guest_decommission(&decommission, NULL);
+       sev_decommission(handle);
 }
 
 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Bind ASID to this guest */
        ret = sev_bind_asid(kvm, start.handle, error);
-       if (ret)
+       if (ret) {
+               sev_decommission(start.handle);
                goto e_free_session;
+       }
 
        /* return handle to userspace */
        params.handle = start.handle;
@@ -1103,10 +1113,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
        struct sev_data_send_start data;
        int ret;
 
+       memset(&data, 0, sizeof(data));
        data.handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
-       if (ret < 0)
-               return ret;
 
        params->session_len = data.session_len;
        if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1215,10 +1224,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
        struct sev_data_send_update_data data;
        int ret;
 
+       memset(&data, 0, sizeof(data));
        data.handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
-       if (ret < 0)
-               return ret;
 
        params->hdr_len = data.hdr_len;
        params->trans_len = data.trans_len;
index a61c015..4f83914 100644 (file)
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
        TP_ARGS(msg, err),
 
        TP_STRUCT__entry(
-               __field(const char *, msg)
+               __string(msg, msg)
                __field(u32, err)
        ),
 
        TP_fast_assign(
-               __entry->msg = msg;
+               __assign_str(msg, msg);
                __entry->err = err;
        ),
 
-       TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+       TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
                __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
 );
 
index 50b42d7..c2a779b 100644 (file)
@@ -6247,6 +6247,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
        switch (kvm_get_apic_mode(vcpu)) {
        case LAPIC_MODE_INVALID:
                WARN_ONCE(true, "Invalid local APIC state");
+               break;
        case LAPIC_MODE_DISABLED:
                break;
        case LAPIC_MODE_XAPIC:
index b594275..e0f4a46 100644 (file)
@@ -3072,6 +3072,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.tlb_flush;
+
+       if (!tdp_enabled) {
+               /*
+                * A TLB flush on behalf of the guest is equivalent to
+                * INVPCID(all), toggling CR4.PGE, etc., which requires
+                * a forced sync of the shadow page tables.  Unload the
+                * entire MMU here and the subsequent load will sync the
+                * shadow page tables, and also flush the TLB.
+                */
+               kvm_mmu_unload(vcpu);
+               return;
+       }
+
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
 }
 
@@ -3101,9 +3114,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
         * expensive IPIs.
         */
        if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+               u8 st_preempted = xchg(&st->preempted, 0);
+
                trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
-                                      st->preempted & KVM_VCPU_FLUSH_TLB);
-               if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+                                      st_preempted & KVM_VCPU_FLUSH_TLB);
+               if (st_preempted & KVM_VCPU_FLUSH_TLB)
                        kvm_vcpu_flush_tlb_guest(vcpu);
        } else {
                st->preempted = 0;
@@ -7091,7 +7106,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       vcpu->arch.hflags = emul_flags;
+       kvm_mmu_reset_context(vcpu);
 }
 
 static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
@@ -8243,6 +8261,7 @@ void kvm_arch_exit(void)
        kvm_x86_ops.hardware_enable = NULL;
        kvm_mmu_module_exit();
        free_percpu(user_return_msrs);
+       kmem_cache_destroy(x86_emulator_cache);
        kmem_cache_destroy(x86_fpu_cache);
 #ifdef CONFIG_KVM_XEN
        static_key_deferred_flush(&kvm_xen_enabled);
index 1c548ad..6bda7f6 100644 (file)
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 
        if (si_code == SEGV_PKUERR)
                force_sig_pkuerr((void __user *)address, pkey);
-
-       force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+       else
+               force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
        local_irq_disable();
 }
index 12c686c..60ade7d 100644 (file)
@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
        if (!IS_ENABLED(CONFIG_EFI))
                return;
 
-       if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+       if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
+           (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
+            efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
                desc->flags |= IORES_MAP_ENCRYPTED;
 }
 
index a9639f6..470b202 100644 (file)
@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
 #define AMD_SME_BIT    BIT(0)
 #define AMD_SEV_BIT    BIT(1)
 
-       /* Check the SEV MSR whether SEV or SME is enabled */
-       sev_status   = __rdmsr(MSR_AMD64_SEV);
-       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
-
        /*
         * Check for the SME/SEV feature:
         *   CPUID Fn8000_001F[EAX]
@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp)
        eax = 0x8000001f;
        ecx = 0;
        native_cpuid(&eax, &ebx, &ecx, &edx);
-       if (!(eax & feature_mask))
+       /* Check whether SEV or SME is supported */
+       if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
                return;
 
        me_mask = 1UL << (ebx & 0x3f);
 
+       /* Check the SEV MSR whether SEV or SME is enabled */
+       sev_status   = __rdmsr(MSR_AMD64_SEV);
+       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
        /* Check if memory encryption is enabled */
        if (feature_mask == AMD_SME_BIT) {
                /*
index 5eb4dc2..e94da74 100644 (file)
@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 
                /* make sure all non-reserved blocks are inside the limits */
                bi->start = max(bi->start, low);
-               bi->end = min(bi->end, high);
+
+               /* preserve info for non-RAM areas above 'max_pfn': */
+               if (bi->end > high) {
+                       numa_add_memblk_to(bi->nid, high, bi->end,
+                                          &numa_reserved_meminfo);
+                       bi->end = high;
+               }
 
                /* and there's no empty block */
                if (bi->start >= bi->end)
index 02dc646..2edd866 100644 (file)
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
 
+#define RS690_LOWER_TOP_OF_DRAM2       0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2       0x31
+#define RS690_HTIU_NB_INDEX            0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE  0x100
+#define RS690_HTIU_NB_DATA             0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+       u32 val = 0;
+       phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+       if (top_of_dram <= (1ULL << 32))
+               return;
+
+       pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+                               RS690_LOWER_TOP_OF_DRAM2);
+       pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+       if (val)
+               return;
+
+       pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+       pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+               RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+       pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+       pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+               RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+       pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+               top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
 #endif
index 7850111..b15ebfe 100644 (file)
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
                        size -= rm_size;
                }
 
+               /*
+                * Don't free memory under 1M for two reasons:
+                * - BIOS might clobber it
+                * - Crash kernel needs it to be reserved
+                */
+               if (start + size < SZ_1M)
+                       continue;
+               if (start < SZ_1M) {
+                       size -= (SZ_1M - start);
+                       start = SZ_1M;
+               }
+
                memblock_free_late(start, size);
        }
 
index 2e1c1be..6534c92 100644 (file)
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
 
        /* Has to be under 1M so we can execute real-mode AP code. */
        mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-       if (!mem) {
+       if (!mem)
                pr_info("No sub-1M memory is available for the trampoline\n");
-               return;
-       }
+       else
+               set_real_mode_mem(mem);
 
-       memblock_reserve(mem, size);
-       set_real_mode_mem(mem);
-       crash_reserve_low_1M();
+       /*
+        * Unconditionally reserve the entire fisrt 1M, see comment in
+        * setup_arch().
+        */
+       memblock_reserve(0, SZ_1M);
 }
 
 static void sme_sev_setup_real_mode(struct trampoline_header *th)
index 6cd7f70..d8a9152 100644 (file)
@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
                if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
                        src_cnt--;
                        src_list++;
-                       src_offs++;
+                       if (src_offs)
+                               src_offs++;
                }
 
                /* wait for any prerequisite operations */
index 624a267..e5ba979 100644 (file)
@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                }
                break;
 
+       case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
+
+               ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+                                 "***** Address handler %p\n", object));
+
+               acpi_os_delete_mutex(object->address_space.context_mutex);
+               break;
+
        default:
 
                break;
index be7da23..a4bd673 100644 (file)
@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
        if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
                return;
 
-       capbuf_ret = context.ret.pointer;
-       if (context.ret.length <= OSC_SUPPORT_DWORD) {
-               kfree(context.ret.pointer);
-               return;
-       }
+       kfree(context.ret.pointer);
 
-       /*
-        * Now run _OSC again with query flag clear and with the caps
-        * supported by both the OS and the platform.
-        */
+       /* Now run _OSC again with query flag clear */
        capbuf[OSC_QUERY_DWORD] = 0;
-       capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
-       kfree(context.ret.pointer);
 
        if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
                return;
 
        capbuf_ret = context.ret.pointer;
-       if (context.ret.length > OSC_SUPPORT_DWORD) {
-               osc_sb_apei_support_acked =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
-               osc_pc_lpi_support_confirmed =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
-               osc_sb_native_usb4_support_confirmed =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
-       }
+       osc_sb_apei_support_acked =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+       osc_pc_lpi_support_confirmed =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+       osc_sb_native_usb4_support_confirmed =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
 
        kfree(context.ret.pointer);
 }
index df38657..3bb2ade 100644 (file)
@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
                return;
 
        acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
-       if (facs) {
+       if (facs)
                s4_hardware_signature = facs->hardware_signature;
-               acpi_put_table((struct acpi_table_header *)facs);
-       }
 }
 #else /* !CONFIG_HIBERNATION */
 static inline void acpi_sleep_hibernate_setup(void) {}
index b31b3af..d5ffaab 100644 (file)
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
        struct zone *zone;
        int ret;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-
        /*
         * Unaccount before offlining, such that unpopulated zone and kthreads
         * can properly be torn down in offline_pages().
         */
-       if (nr_vmemmap_pages)
+       if (nr_vmemmap_pages) {
+               zone = page_zone(pfn_to_page(start_pfn));
                adjust_present_page_count(zone, -nr_vmemmap_pages);
+       }
 
        ret = offline_pages(start_pfn + nr_vmemmap_pages,
                            nr_pages - nr_vmemmap_pages);
index d58d68f..76e12f3 100644 (file)
@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
 
 static int lo_open(struct block_device *bdev, fmode_t mode)
 {
-       struct loop_device *lo;
+       struct loop_device *lo = bdev->bd_disk->private_data;
        int err;
 
-       /*
-        * take loop_ctl_mutex to protect lo pointer from race with
-        * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
-        * release it prior to updating lo->lo_refcnt.
-        */
-       err = mutex_lock_killable(&loop_ctl_mutex);
-       if (err)
-               return err;
-       lo = bdev->bd_disk->private_data;
-       if (!lo) {
-               mutex_unlock(&loop_ctl_mutex);
-               return -ENXIO;
-       }
        err = mutex_lock_killable(&lo->lo_mutex);
-       mutex_unlock(&loop_ctl_mutex);
        if (err)
                return err;
-       atomic_inc(&lo->lo_refcnt);
+       if (lo->lo_state == Lo_deleting)
+               err = -ENXIO;
+       else
+               atomic_inc(&lo->lo_refcnt);
        mutex_unlock(&lo->lo_mutex);
-       return 0;
+       return err;
 }
 
 static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
                        mutex_unlock(&lo->lo_mutex);
                        break;
                }
-               lo->lo_disk->private_data = NULL;
+               lo->lo_state = Lo_deleting;
                mutex_unlock(&lo->lo_mutex);
                idr_remove(&loop_index_idr, lo->lo_number);
                loop_remove(lo);
index a3c04f3..5beb959 100644 (file)
@@ -22,6 +22,7 @@ enum {
        Lo_unbound,
        Lo_bound,
        Lo_rundown,
+       Lo_deleting,
 };
 
 struct loop_func_table;
index 5d603ef..7f6ba2c 100644 (file)
@@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
        /* Realtek 8822CE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
                                                     BTUSB_WIDEBAND_SPEECH },
+       { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
+                                                    BTUSB_WIDEBAND_SPEECH },
 
        /* Realtek 8852AE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
@@ -2527,10 +2529,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
        }
 
        btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
-       err = request_firmware(&fw, fwname, &hdev->dev);
+       err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
                           fwname, err);
+
                return err;
        }
 
@@ -2680,12 +2689,24 @@ download:
        err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
                                                sizeof(fwname), "sfi");
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Unsupported Intel firmware naming");
                return -EINVAL;
        }
 
-       err = request_firmware(&fw, fwname, &hdev->dev);
+       err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
                           fwname, err);
                return err;
index 7c810f0..b3357a8 100644 (file)
@@ -311,8 +311,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
        MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
        MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
        MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
-       MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
-       MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
        MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
        MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
 };
@@ -708,7 +708,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
        struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
        struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
 
-       del_timer(&mhi_pdev->health_check_timer);
+       del_timer_sync(&mhi_pdev->health_check_timer);
        cancel_work_sync(&mhi_pdev->recovery_work);
 
        if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -935,9 +935,43 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
        return ret;
 }
 
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+       struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+       struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+       /* We want to stop all operations, hibernation does not guarantee that
+        * device will be in the same state as before freezing, especially if
+        * the intermediate restore kernel reinitializes MHI device with new
+        * context.
+        */
+       if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+               mhi_power_down(mhi_cntrl, false);
+               mhi_unprepare_after_power_down(mhi_cntrl);
+       }
+
+       return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+       struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+       /* Reinitialize the device */
+       queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+       return 0;
+}
+
 static const struct dev_pm_ops mhi_pci_pm_ops = {
        SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+       .suspend = mhi_pci_suspend,
+       .resume = mhi_pci_resume,
+       .freeze = mhi_pci_freeze,
+       .thaw = mhi_pci_restore,
+       .restore = mhi_pci_restore,
+#endif
 };
 
 static struct pci_driver mhi_pci_driver = {
index 5fae60f..38cb116 100644 (file)
@@ -1334,6 +1334,34 @@ err_allow_idle:
        return error;
 }
 
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+       struct device *dev = ddata->dev;
+       int error;
+
+       /* Disable target module if it is enabled */
+       if (ddata->enabled) {
+               error = sysc_runtime_suspend(dev);
+               if (error)
+                       dev_warn(dev, "reinit suspend failed: %i\n", error);
+       }
+
+       /* Enable target module */
+       error = sysc_runtime_resume(dev);
+       if (error)
+               dev_warn(dev, "reinit resume failed: %i\n", error);
+
+       if (leave_enabled)
+               return error;
+
+       /* Disable target module if no leave_enabled was set */
+       error = sysc_runtime_suspend(dev);
+       if (error)
+               dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+       return error;
+}
+
 static int __maybe_unused sysc_noirq_suspend(struct device *dev)
 {
        struct sysc *ddata;
@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
            (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
-       return pm_runtime_force_suspend(dev);
+       if (!ddata->enabled)
+               return 0;
+
+       ddata->needs_resume = 1;
+
+       return sysc_runtime_suspend(dev);
 }
 
 static int __maybe_unused sysc_noirq_resume(struct device *dev)
 {
        struct sysc *ddata;
+       int error = 0;
 
        ddata = dev_get_drvdata(dev);
 
@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
            (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
-       return pm_runtime_force_resume(dev);
+       if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+               error = sysc_reinit_module(ddata, ddata->needs_resume);
+               if (error)
+                       dev_warn(dev, "noirq_resume failed: %i\n", error);
+       } else if (ddata->needs_resume) {
+               error = sysc_runtime_resume(dev);
+               if (error)
+                       dev_warn(dev, "noirq_resume failed: %i\n", error);
+       }
+
+       ddata->needs_resume = 0;
+
+       return error;
 }
 
 static const struct dev_pm_ops sysc_pm_ops = {
@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        /* Uarts on omap4 and later */
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
-                  SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
-                  SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
 
        /* Quirks that need to be set based on the module address */
        SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1459,6 +1505,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+       SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -1466,7 +1514,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
                   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
-                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+                  SYSC_QUIRK_REINIT_ON_RESUME),
        SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
                   SYSC_MODULE_QUIRK_WDT),
        /* PRUSS on am3, am4 and am5 */
@@ -1524,7 +1573,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
        SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
        SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
-       SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
index a5c5f70..e65e0a4 100644 (file)
@@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
 
          If in doubt, say N.
 
-config ACPI_CPPC_CPUFREQ_FIE
-       bool "Frequency Invariance support for CPPC cpufreq driver"
-       depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
-       default y
-       help
-         This extends frequency invariance support in the CPPC cpufreq driver,
-         by using CPPC delivered and reference performance counters.
-
-         If in doubt, say N.
-
 config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
        tristate "Allwinner nvmem based SUN50I CPUFreq driver"
        depends on ARCH_SUNXI
index 3848b4c..2f769b1 100644 (file)
 
 #define pr_fmt(fmt)    "CPPC Cpufreq:" fmt
 
-#include <linux/arch_topology.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/dmi.h>
-#include <linux/irq_work.h>
-#include <linux/kthread.h>
 #include <linux/time.h>
 #include <linux/vmalloc.h>
-#include <uapi/linux/sched/types.h>
 
 #include <asm/unaligned.h>
 
@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
        }
 };
 
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
-
-/* Frequency invariance support */
-struct cppc_freq_invariance {
-       int cpu;
-       struct irq_work irq_work;
-       struct kthread_work work;
-       struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
-       struct cppc_cpudata *cpu_data;
-};
-
-static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
-static struct kthread_worker *kworker_fie;
-static bool fie_disabled;
-
-static struct cpufreq_driver cppc_cpufreq_driver;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t0,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t1);
-
-/**
- * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
- * @work: The work item.
- *
- * The CPPC driver register itself with the topology core to provide its own
- * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
- * gets called by the scheduler on every tick.
- *
- * Note that the arch specific counters have higher priority than CPPC counters,
- * if available, though the CPPC driver doesn't need to have any special
- * handling for that.
- *
- * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
- * reach here from hard-irq context), which then schedules a normal work item
- * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
- * based on the counter updates since the last tick.
- */
-static void cppc_scale_freq_workfn(struct kthread_work *work)
-{
-       struct cppc_freq_invariance *cppc_fi;
-       struct cppc_perf_fb_ctrs fb_ctrs = {0};
-       struct cppc_cpudata *cpu_data;
-       unsigned long local_freq_scale;
-       u64 perf;
-
-       cppc_fi = container_of(work, struct cppc_freq_invariance, work);
-       cpu_data = cppc_fi->cpu_data;
-
-       if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
-               pr_warn("%s: failed to read perf counters\n", __func__);
-               return;
-       }
-
-       cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
-       perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
-                                    fb_ctrs);
-
-       perf <<= SCHED_CAPACITY_SHIFT;
-       local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
-       if (WARN_ON(local_freq_scale > 1024))
-               local_freq_scale = 1024;
-
-       per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
-}
-
-static void cppc_irq_work(struct irq_work *irq_work)
-{
-       struct cppc_freq_invariance *cppc_fi;
-
-       cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
-       kthread_queue_work(kworker_fie, &cppc_fi->work);
-}
-
-static void cppc_scale_freq_tick(void)
-{
-       struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
-
-       /*
-        * cppc_get_perf_ctrs() can potentially sleep, call that from the right
-        * context.
-        */
-       irq_work_queue(&cppc_fi->irq_work);
-}
-
-static struct scale_freq_data cppc_sftd = {
-       .source = SCALE_FREQ_SOURCE_CPPC,
-       .set_freq_scale = cppc_scale_freq_tick,
-};
-
-static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
-                                            struct cppc_cpudata *cpu_data)
-{
-       struct cppc_perf_fb_ctrs fb_ctrs = {0};
-       struct cppc_freq_invariance *cppc_fi;
-       int i, ret;
-
-       if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-               return;
-
-       if (fie_disabled)
-               return;
-
-       for_each_cpu(i, policy->cpus) {
-               cppc_fi = &per_cpu(cppc_freq_inv, i);
-               cppc_fi->cpu = i;
-               cppc_fi->cpu_data = cpu_data;
-               kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
-               init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
-
-               ret = cppc_get_perf_ctrs(i, &fb_ctrs);
-               if (ret) {
-                       pr_warn("%s: failed to read perf counters: %d\n",
-                               __func__, ret);
-                       fie_disabled = true;
-               } else {
-                       cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
-               }
-       }
-}
-
-static void __init cppc_freq_invariance_init(void)
-{
-       struct sched_attr attr = {
-               .size           = sizeof(struct sched_attr),
-               .sched_policy   = SCHED_DEADLINE,
-               .sched_nice     = 0,
-               .sched_priority = 0,
-               /*
-                * Fake (unused) bandwidth; workaround to "fix"
-                * priority inheritance.
-                */
-               .sched_runtime  = 1000000,
-               .sched_deadline = 10000000,
-               .sched_period   = 10000000,
-       };
-       int ret;
-
-       if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-               return;
-
-       if (fie_disabled)
-               return;
-
-       kworker_fie = kthread_create_worker(0, "cppc_fie");
-       if (IS_ERR(kworker_fie))
-               return;
-
-       ret = sched_setattr_nocheck(kworker_fie->task, &attr);
-       if (ret) {
-               pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
-                       ret);
-               kthread_destroy_worker(kworker_fie);
-               return;
-       }
-
-       /* Register for freq-invariance */
-       topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
-}
-
-static void cppc_freq_invariance_exit(void)
-{
-       struct cppc_freq_invariance *cppc_fi;
-       int i;
-
-       if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-               return;
-
-       if (fie_disabled)
-               return;
-
-       topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
-
-       for_each_possible_cpu(i) {
-               cppc_fi = &per_cpu(cppc_freq_inv, i);
-               irq_work_sync(&cppc_fi->irq_work);
-       }
-
-       kthread_destroy_worker(kworker_fie);
-       kworker_fie = NULL;
-}
-
-#else
-static inline void
-cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
-                                struct cppc_cpudata *cpu_data)
-{
-}
-
-static inline void cppc_freq_invariance_init(void)
-{
-}
-
-static inline void cppc_freq_invariance_exit(void)
-{
-}
-#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-
 /* Callback function used to retrieve the max frequency from DMI */
 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
 {
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
        cpu_data->perf_ctrls.desired_perf =  caps->highest_perf;
 
        ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
-       if (ret) {
+       if (ret)
                pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
                         caps->highest_perf, cpu, ret);
-       } else {
-               cppc_freq_invariance_policy_init(policy, cpu_data);
-       }
 
        return ret;
 }
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
        return (u32)t1 - (u32)t0;
 }
 
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t0,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t1)
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
+                                    struct cppc_perf_fb_ctrs fb_ctrs_t0,
+                                    struct cppc_perf_fb_ctrs fb_ctrs_t1)
 {
        u64 delta_reference, delta_delivered;
-       u64 reference_perf;
+       u64 reference_perf, delivered_perf;
 
        reference_perf = fb_ctrs_t0.reference_perf;
 
@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
        delta_delivered = get_delta(fb_ctrs_t1.delivered,
                                    fb_ctrs_t0.delivered);
 
-       /* Check to avoid divide-by zero and invalid delivered_perf */
-       if (!delta_reference || !delta_delivered)
-               return cpu_data->perf_ctrls.desired_perf;
-
-       return (reference_perf * delta_delivered) / delta_reference;
-}
-
-static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
-                                    struct cppc_perf_fb_ctrs fb_ctrs_t0,
-                                    struct cppc_perf_fb_ctrs fb_ctrs_t1)
-{
-       u64 delivered_perf;
-
-       delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
-                                              fb_ctrs_t1);
+       /* Check to avoid divide-by zero */
+       if (delta_reference || delta_delivered)
+               delivered_perf = (reference_perf * delta_delivered) /
+                                       delta_reference;
+       else
+               delivered_perf = cpu_data->perf_ctrls.desired_perf;
 
        return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
 }
@@ -718,8 +504,6 @@ static void cppc_check_hisi_workaround(void)
 
 static int __init cppc_cpufreq_init(void)
 {
-       int ret;
-
        if ((acpi_disabled) || !acpi_cpc_valid())
                return -ENODEV;
 
@@ -727,11 +511,7 @@ static int __init cppc_cpufreq_init(void)
 
        cppc_check_hisi_workaround();
 
-       ret = cpufreq_register_driver(&cppc_cpufreq_driver);
-       if (!ret)
-               cppc_freq_invariance_init();
-
-       return ret;
+       return cpufreq_register_driver(&cppc_cpufreq_driver);
 }
 
 static inline void free_cpu_data(void)
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
 
 static void __exit cppc_cpufreq_exit(void)
 {
-       cppc_freq_invariance_exit();
        cpufreq_unregister_driver(&cppc_cpufreq_driver);
 
        free_cpu_data();
index 6ab9d9a..39b5b46 100644 (file)
@@ -59,6 +59,7 @@ config DMA_OF
 #devices
 config ALTERA_MSGDMA
        tristate "Altera / Intel mSGDMA Engine"
+       depends on HAS_IOMEM
        select DMA_ENGINE
        help
          Enable support for Altera / Intel mSGDMA controller.
@@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
 
 config XILINX_ZYNQMP_DPDMA
        tristate "Xilinx DPDMA Engine"
+       depends on HAS_IOMEM && OF
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
index 4ec909e..4ae0579 100644 (file)
@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
        }
 
        if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+               err = -EINVAL;
                dev_err(dev, "DPDMAI major version mismatch\n"
                             "Found %u.%u, supported version is %u.%u\n",
                                priv->dpdmai_attr.version.major,
@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
        }
 
        if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+               err = -EINVAL;
                dev_err(dev, "DPDMAI minor version mismatch\n"
                             "Found %u.%u, supported version is %u.%u\n",
                                priv->dpdmai_attr.version.major,
@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
                ppriv->store =
                        dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
                if (!ppriv->store) {
+                       err = -ENOMEM;
                        dev_err(dev, "dpaa2_io_store_create() failed\n");
                        goto err_store;
                }
index 302cba5..d4419bf 100644 (file)
@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
                pasid = iommu_sva_get_pasid(sva);
                if (pasid == IOMMU_PASID_INVALID) {
                        iommu_sva_unbind_device(sva);
+                       rc = -EINVAL;
                        goto failed;
                }
 
index 2a926be..442d55c 100644 (file)
@@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        return rc;
 }
 
+static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       struct idxd_irq_entry *irq_entry;
+       int i, msixcnt;
+
+       msixcnt = pci_msix_vec_count(pdev);
+       if (msixcnt <= 0)
+               return;
+
+       irq_entry = &idxd->irq_entries[0];
+       free_irq(irq_entry->vector, irq_entry);
+
+       for (i = 1; i < msixcnt; i++) {
+
+               irq_entry = &idxd->irq_entries[i];
+               if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
+                       idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+                                                      IDXD_IRQ_MSIX);
+               free_irq(irq_entry->vector, irq_entry);
+       }
+
+       idxd_mask_error_interrupts(idxd);
+       pci_free_irq_vectors(pdev);
+}
+
 static int idxd_setup_wqs(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
@@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
                engine->idxd = idxd;
                device_initialize(&engine->conf_dev);
                engine->conf_dev.parent = &idxd->conf_dev;
+               engine->conf_dev.bus = &dsa_bus_type;
                engine->conf_dev.type = &idxd_engine_device_type;
                rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
                if (rc < 0) {
@@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
        return rc;
 }
 
+static void idxd_cleanup_internals(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_groups; i++)
+               put_device(&idxd->groups[i]->conf_dev);
+       for (i = 0; i < idxd->max_engines; i++)
+               put_device(&idxd->engines[i]->conf_dev);
+       for (i = 0; i < idxd->max_wqs; i++)
+               put_device(&idxd->wqs[i]->conf_dev);
+       destroy_workqueue(idxd->wq);
+}
+
 static int idxd_setup_internals(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
@@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
                dev_dbg(dev, "Loading RO device config\n");
                rc = idxd_device_load_config(idxd);
                if (rc < 0)
-                       goto err;
+                       goto err_config;
        }
 
        rc = idxd_setup_interrupts(idxd);
        if (rc)
-               goto err;
+               goto err_config;
 
        dev_dbg(dev, "IDXD interrupt setup complete.\n");
 
@@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
        dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
        return 0;
 
+ err_config:
+       idxd_cleanup_internals(idxd);
  err:
        if (device_pasid_enabled(idxd))
                idxd_disable_system_pasid(idxd);
@@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
        return rc;
 }
 
+static void idxd_cleanup(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+
+       perfmon_pmu_remove(idxd);
+       idxd_cleanup_interrupts(idxd);
+       idxd_cleanup_internals(idxd);
+       if (device_pasid_enabled(idxd))
+               idxd_disable_system_pasid(idxd);
+       iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+}
+
 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct device *dev = &pdev->dev;
@@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        rc = idxd_register_devices(idxd);
        if (rc) {
                dev_err(dev, "IDXD sysfs setup failed\n");
-               goto err;
+               goto err_dev_register;
        }
 
        idxd->state = IDXD_DEV_CONF_READY;
@@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        return 0;
 
+ err_dev_register:
+       idxd_cleanup(idxd);
  err:
        pci_iounmap(pdev, idxd->reg_base);
  err_iomap:
@@ -745,12 +801,12 @@ static int __init idxd_init_module(void)
         * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
         * enumerating the device. We can not utilize it.
         */
-       if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+       if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
                pr_warn("idxd driver failed to load without MOVDIR64B.\n");
                return -ENODEV;
        }
 
-       if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+       if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
                pr_warn("Platform does not have ENQCMD(S) support.\n");
        else
                support_enqcmd = true;
@@ -787,6 +843,7 @@ module_init(idxd_init_module);
 
 static void __exit idxd_exit_module(void)
 {
+       idxd_unregister_driver();
        pci_unregister_driver(&idxd_pci_driver);
        idxd_cdev_remove();
        idxd_unregister_bus_type();
index 0d5c42f..97d9a6f 100644 (file)
@@ -230,7 +230,7 @@ out:
 }
 
 /**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
+ * ipu_irq_unmap() - unmap an IPU interrupt source
  * @source:    interrupt source bit position (see ipu_irq_map())
  * @return:    0 or negative error code
  */
index 27c0735..375e7e6 100644 (file)
@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
 
 static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
 {
-       struct dma_chan *chan = vd->tx.chan;
-       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
-
-       kfree(c->desc);
+       kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
 }
 
 static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
 
 static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
 {
-       struct mtk_uart_apdma_desc *d = c->desc;
-
        mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
        mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
        mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
-
-       list_del(&d->vd.node);
-       vchan_cookie_complete(&d->vd);
 }
 
 static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
 
        c->rx_status = d->avail_len - cnt;
        mtk_uart_apdma_write(c, VFF_RPT, wg);
+}
 
-       list_del(&d->vd.node);
-       vchan_cookie_complete(&d->vd);
+static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
+{
+       struct mtk_uart_apdma_desc *d = c->desc;
+
+       if (d) {
+               list_del(&d->vd.node);
+               vchan_cookie_complete(&d->vd);
+               c->desc = NULL;
+       }
 }
 
 static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
                mtk_uart_apdma_rx_handler(c);
        else if (c->dir == DMA_MEM_TO_DEV)
                mtk_uart_apdma_tx_handler(c);
+       mtk_uart_apdma_chan_complete_handler(c);
        spin_unlock_irqrestore(&c->vc.lock, flags);
 
        return IRQ_HANDLED;
@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
                return NULL;
 
        /* Now allocate and setup the descriptor */
-       d = kzalloc(sizeof(*d), GFP_ATOMIC);
+       d = kzalloc(sizeof(*d), GFP_NOWAIT);
        if (!d)
                return NULL;
 
@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
        unsigned long flags;
 
        spin_lock_irqsave(&c->vc.lock, flags);
-       if (vchan_issue_pending(&c->vc)) {
+       if (vchan_issue_pending(&c->vc) && !c->desc) {
                vd = vchan_next_desc(&c->vc);
                c->desc = to_mtk_uart_apdma_desc(&vd->tx);
 
index fd8d2bc..110de8a 100644 (file)
@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
        for (i = 0; i < len / period_len; i++) {
                desc = pl330_get_desc(pch);
                if (!desc) {
+                       unsigned long iflags;
+
                        dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
                                __func__, __LINE__);
 
                        if (!first)
                                return NULL;
 
-                       spin_lock_irqsave(&pl330->pool_lock, flags);
+                       spin_lock_irqsave(&pl330->pool_lock, iflags);
 
                        while (!list_empty(&first->node)) {
                                desc = list_entry(first->node.next,
@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 
                        list_move_tail(&first->node, &pl330->desc_pool);
 
-                       spin_unlock_irqrestore(&pl330->pool_lock, flags);
+                       spin_unlock_irqrestore(&pl330->pool_lock, iflags);
 
                        return NULL;
                }
index 365f94e..3f926a6 100644 (file)
@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
 
 config QCOM_HIDMA_MGMT
        tristate "Qualcomm Technologies HIDMA Management support"
+       depends on HAS_IOMEM
        select DMA_ENGINE
        help
          Enable support for the Qualcomm Technologies HIDMA Management.
index f8ffa02..ba46a0a 100644 (file)
@@ -1,5 +1,6 @@
 config SF_PDMA
        tristate "Sifive PDMA controller driver"
+       depends on HAS_IOMEM
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
index d530c1b..6885b3d 100644 (file)
@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
 
        /* Enable runtime PM and initialize the device. */
        pm_runtime_enable(&pdev->dev);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
                return ret;
index 265d7c0..e182739 100644 (file)
@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
 
        kfree(base->lcla_pool.base_unaligned);
 
+       if (base->lcpa_base)
+               iounmap(base->lcpa_base);
+
        if (base->phy_lcpa)
                release_mem_region(base->phy_lcpa,
                                   base->lcpa_size);
index 36ba8b4..18cbd1e 100644 (file)
@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
                return -ENOMEM;
        }
 
-       ret = pm_runtime_get_sync(dmadev->ddev.dev);
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
        if (ret < 0)
                return ret;
 
@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
        u32 ccr, id;
        int ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index 70b29bd..6c70980 100644 (file)
 #define XILINX_DPDMA_CH_VDO                            0x020
 #define XILINX_DPDMA_CH_PYLD_SZ                                0x024
 #define XILINX_DPDMA_CH_DESC_ID                                0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK                   GENMASK(15, 0)
 
 /* DPDMA descriptor fields */
 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE             0xa5
@@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
         * will be used, but it should be enough.
         */
        list_for_each_entry(sw_desc, &desc->descriptors, node)
-               sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
+               sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+                                   & XILINX_DPDMA_CH_DESC_ID_MASK;
 
        sw_desc = list_first_entry(&desc->descriptors,
                                   struct xilinx_dpdma_sw_desc, node);
@@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
        if (!chan->running || !pending)
                goto out;
 
-       desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
+       desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+               & XILINX_DPDMA_CH_DESC_ID_MASK;
 
        /* If the retrigger raced with vsync, retry at the next frame. */
        sw_desc = list_first_entry(&pending->descriptors,
@@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
  */
 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
 {
-       dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+       dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
        dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
 }
 
@@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
        return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
 }
 
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+       unsigned int i;
+       void __iomem *reg;
+
+       /* Disable all interrupts */
+       xilinx_dpdma_disable_irq(xdev);
+
+       /* Stop all channels */
+       for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+               reg = xdev->reg + XILINX_DPDMA_CH_BASE
+                               + XILINX_DPDMA_CH_OFFSET * i;
+               dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+       }
+
+       /* Clear the interrupt status registers */
+       dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+       dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
 static int xilinx_dpdma_probe(struct platform_device *pdev)
 {
        struct xilinx_dpdma_device *xdev;
@@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
        if (IS_ERR(xdev->reg))
                return PTR_ERR(xdev->reg);
 
+       dpdma_hw_init(xdev);
+
        xdev->irq = platform_get_irq(pdev, 0);
        if (xdev->irq < 0) {
                dev_err(xdev->dev, "failed to get platform irq\n");
index d841956..5fecf5a 100644 (file)
@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
        struct zynqmp_dma_desc_sw *desc;
        int i, ret;
 
-       ret = pm_runtime_get_sync(chan->dev);
+       ret = pm_runtime_resume_and_get(chan->dev);
        if (ret < 0)
                return ret;
 
index e15d484..ea7ca74 100644 (file)
@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
        if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
                return 0;
 
-       n = 0;
-       len = CPER_REC_LEN - 1;
+       len = CPER_REC_LEN;
        dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
        if (bank && device)
                n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
                             "DIMM location: not present. DMI handle: 0x%.4x ",
                             mem->mem_dev_handle);
 
-       msg[n] = '\0';
        return n;
 }
 
index bb042ab..e901f85 100644 (file)
@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
 
+       if (!fdt)
+               return 0;
+
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                node = fdt_path_offset(fdt, dt_params[i].path);
                if (node < 0)
index 4e81c60..dd95f33 100644 (file)
@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
                return 0;
 
        /* Skip any leading slashes */
-       while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+       while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
                i++;
 
        while (--result_len > 0 && i < cmdline_len) {
index 5737cb0..0a9aba5 100644 (file)
@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
                return false;
        }
 
-       if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
-               pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
-               return false;
-       }
-
        if (PAGE_SIZE > EFI_PAGE_SIZE &&
            (!PAGE_ALIGNED(in->phys_addr) ||
             !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
index 1cbce59..97e6cae 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 
-#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_PIN_MASK(p) BIT(p)
 #define WCD_REG_DIR_CTL_OFFSET 0x42
 #define WCD_REG_VAL_CTL_OFFSET 0x43
 #define WCD934X_NPINS          5
index 0350205..6819fe5 100644 (file)
@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
 {
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr;
-       unsigned long ras_counter;
 
        if (!fpriv)
                return -EINVAL;
@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        if (atomic_read(&ctx->guilty))
                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
 
-       /*query ue count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, false);
-       /*ras counter is monotonic increasing*/
-       if (ras_counter != ctx->ras_counter_ue) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
-               ctx->ras_counter_ue = ras_counter;
-       }
-
-       /*query ce count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, true);
-       if (ras_counter != ctx->ras_counter_ce) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
-               ctx->ras_counter_ce = ras_counter;
-       }
-
        mutex_unlock(&mgr->lock);
        return 0;
 }
index 66ddfe4..57ec108 100644 (file)
@@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
  */
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 {
-       if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+       if (amdgpu_sriov_vf(adev) || 
+           adev->enable_virtual_display ||
+           (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
                return false;
 
        return amdgpu_device_asic_has_dc_support(adev->asic_type);
index 8a1fb8b..c13985f 100644 (file)
@@ -1057,7 +1057,7 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
 
        return 0;
 err:
-       drm_err(dev, "Failed to init gem fb: %d\n", ret);
+       drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
        rfb->base.obj[0] = NULL;
        return ret;
 }
@@ -1094,7 +1094,7 @@ int amdgpu_display_gem_fb_verify_and_init(
 
        return 0;
 err:
-       drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+       drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
        rfb->base.obj[0] = NULL;
        return ret;
 }
index 8f4a8f8..39b6c6b 100644 (file)
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 {
        unsigned char buff[34];
-       int addrptr = 0, size = 0;
+       int addrptr, size;
+       int len;
 
        if (!is_fru_eeprom_supported(adev))
                return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        /* If algo exists, it means that the i2c_adapter's initialized */
        if (!adev->pm.smu_i2c.algo) {
                DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
-               return 0;
+               return -ENODEV;
        }
 
        /* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        /* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product name, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product name should only be 32 characters. Any more,
         * and something could be wrong. Cap it at 32 to be safe
         */
-       if (size > 32) {
+       if (len >= sizeof(adev->product_name)) {
                DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
-               size = 32;
+               len = sizeof(adev->product_name) - 1;
        }
        /* Start at 2 due to buff using fields 0 and 1 for the address */
-       memcpy(adev->product_name, &buff[2], size);
-       adev->product_name[size] = '\0';
+       memcpy(adev->product_name, &buff[2], len);
+       adev->product_name[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->product_number)) {
                DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->product_number) - 1;
        }
-       memcpy(adev->product_number, &buff[2], size);
-       adev->product_number[size] = '\0';
+       memcpy(adev->product_number, &buff[2], len);
+       adev->product_number[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product version, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Serial number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->serial)) {
                DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->serial) - 1;
        }
-       memcpy(adev->serial, &buff[2], size);
-       adev->serial[size] = '\0';
+       memcpy(adev->serial, &buff[2], len);
+       adev->serial[len] = '\0';
 
        return 0;
 }
index 1345f7e..f9434bc 100644 (file)
@@ -100,7 +100,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
                kfree(ubo->metadata);
        }
 
-       kfree(bo);
+       kvfree(bo);
 }
 
 /**
@@ -552,7 +552,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 
        *bo_ptr = NULL;
-       bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+       bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
index 46a5328..60aa99a 100644 (file)
@@ -76,6 +76,7 @@ struct psp_ring
        uint64_t                        ring_mem_mc_addr;
        void                            *ring_mem_handle;
        uint32_t                        ring_size;
+       uint32_t                        ring_wptr;
 };
 
 /* More registers may will be supported */
index 7ce76a6..327b1f8 100644 (file)
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
 
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid               0x4ca5
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX      1
+
 #define GFX_RLCG_GC_WRITE_OLD  (0x8 << 28)
 #define GFX_RLCG_GC_WRITE      (0x0 << 28)
 #define GFX_RLCG_GC_READ       (0x1 << 28)
@@ -1480,8 +1483,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
                       (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
        scratch_reg3 = adev->rmmio +
                       (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
-       spare_int = adev->rmmio +
-                   (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+       if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+               spare_int = adev->rmmio +
+                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+                            + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
+       } else {
+               spare_int = adev->rmmio +
+                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+       }
 
        grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
        grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
@@ -6861,8 +6871,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                        (adev->doorbell_index.kiq * 2) << 2);
+               /* If GC has entered CGPG, ringing doorbell > first page doesn't
+                * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+                * this issue.
+                */
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-                       (adev->doorbell_index.userqueue_end * 2) << 2);
+                       (adev->doorbell.size - 4));
        }
 
        WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -7349,9 +7363,15 @@ static int gfx_v10_0_hw_fini(void *handle)
        if (amdgpu_sriov_vf(adev)) {
                gfx_v10_0_cp_gfx_enable(adev, false);
                /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
-               tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
-               tmp &= 0xffffff00;
-               WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+               if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+                       tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+                       tmp &= 0xffffff00;
+                       WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+               } else {
+                       tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+                       tmp &= 0xffffff00;
+                       WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+               }
 
                return 0;
        }
index 516467e..c09225d 100644 (file)
@@ -3673,8 +3673,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
+               /* If GC has entered CGPG, ringing doorbell > first page doesn't
+                * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+                * this issue.
+                */
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                       (adev->doorbell_index.userqueue_end * 2) << 2);
+                                       (adev->doorbell.size - 4));
        }
 
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
index 589410c..02bba1f 100644 (file)
@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
        if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index f2e725f..908664a 100644 (file)
@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
        return data;
@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                        GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index 2bab9c7..cf3803f 100644 (file)
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unpin(bo);
        amdgpu_bo_unreserve(bo);
        amdgpu_bo_unref(&bo);
        return r;
index 389eff9..652cc1a 100644 (file)
@@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
        }
 
-       adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+       if (!adev->dm.dc->ctx->dmub_srv)
+               adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
        if (!adev->dm.dc->ctx->dmub_srv) {
                DRM_ERROR("Couldn't allocate DC DMUB server!\n");
                return -ENOMEM;
@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
 
        amdgpu_dm_irq_suspend(adev);
 
-
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_display_mode saved_mode;
        struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
-       bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool recalculate_timing = false;
+       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing |= amdgpu_freesync_vid_mode &&
+               recalculate_timing = amdgpu_freesync_vid_mode &&
                                 is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        mode = *freesync_mode;
                } else {
                        decide_crtc_timing_for_drm_display_mode(
-                               &mode, preferred_mode,
-                               dm_state ? (dm_state->scaling != RMX_OFF) : false);
-               }
+                               &mode, preferred_mode, scale);
 
-               preferred_refresh = drm_mode_vrefresh(preferred_mode);
+                       preferred_refresh = drm_mode_vrefresh(preferred_mode);
+               }
        }
 
        if (recalculate_timing)
@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        * If scaling is enabled and refresh rate didn't change
        * we copy the vic and polarities of the old timings
        */
-       if (!recalculate_timing || mode_refresh != preferred_refresh)
+       if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
                        requested_bpc);
@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        if (cursor_scale_w != primary_scale_w ||
            cursor_scale_h != primary_scale_h) {
-               DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+               drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
                return -EINVAL;
        }
 
@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
        int i;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *primary_state, *overlay_state = NULL;
+       struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
 
        /* Check if primary plane is contained inside overlay */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
        if (!primary_state->crtc)
                return 0;
 
+       /* check if cursor plane is enabled */
+       cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+       if (IS_ERR(cursor_state))
+               return PTR_ERR(cursor_state);
+
+       if (drm_atomic_plane_disabling(plane->state, cursor_state))
+               return 0;
+
        /* Perform the bounds check to ensure the overlay plane covers the primary */
        if (primary_state->crtc_x < overlay_state->crtc_x ||
            primary_state->crtc_y < overlay_state->crtc_y ||
index 527e56c..8357aa3 100644 (file)
@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
        voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
        dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (voltage_supported && dummy_pstate_supported) {
+       if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
                context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
                goto restore_dml_state;
        }
index f5fe540..27cf227 100644 (file)
@@ -810,6 +810,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
                data->fine_grain_enabled = 1;
+               break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
        default:
                break;
index f2d46b7..232abbb 100644 (file)
@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
 void drm_master_release(struct drm_file *file_priv)
 {
        struct drm_device *dev = file_priv->minor->dev;
-       struct drm_master *master = file_priv->master;
+       struct drm_master *master;
 
        mutex_lock(&dev->master_mutex);
+       master = file_priv->master;
        if (file_priv->magic)
                idr_remove(&file_priv->master->magic_map, file_priv->magic);
 
index d273d1a..495a476 100644 (file)
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
        struct drm_unique *u = data;
-       struct drm_master *master = file_priv->master;
+       struct drm_master *master;
 
-       mutex_lock(&master->dev->master_mutex);
+       mutex_lock(&dev->master_mutex);
+       master = file_priv->master;
        if (u->unique_len >= master->unique_len) {
                if (copy_to_user(u->unique, master->unique, master->unique_len)) {
-                       mutex_unlock(&master->dev->master_mutex);
+                       mutex_unlock(&dev->master_mutex);
                        return -EFAULT;
                }
        }
        u->unique_len = master->unique_len;
-       mutex_unlock(&master->dev->master_mutex);
+       mutex_unlock(&dev->master_mutex);
 
        return 0;
 }
index 93f4d05..1e1cb24 100644 (file)
@@ -20,7 +20,6 @@ config DRM_I915
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
-       select IO_MAPPING
        select SYNC_FILE
        select IOSF_MBI
        select CRC32
index f6fe5cb..8598a1c 100644 (file)
@@ -367,10 +367,11 @@ retry:
                goto err_unpin;
 
        /* Finally, remap it using the new GTT offset */
-       ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
-                       (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
-                       min_t(u64, vma->size, area->vm_end - area->vm_start));
+       ret = remap_io_mapping(area,
+                              area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                              min_t(u64, vma->size, area->vm_end - area->vm_start),
+                              &ggtt->iomap);
        if (ret)
                goto err_fence;
 
index 9ec9277..69e43bf 100644 (file)
@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
 /* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap);
 int remap_io_sg(struct vm_area_struct *vma,
                unsigned long addr, unsigned long size,
                struct scatterlist *sgl, resource_size_t iobase);
index 9a777b0..666808c 100644 (file)
@@ -37,6 +37,17 @@ struct remap_pfn {
        resource_size_t iobase;
 };
 
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+       r->pfn++;
+
+       return 0;
+}
+
 #define use_dma(io) ((io) != -1)
 
 static inline unsigned long sgt_pfn(const struct remap_pfn *r)
@@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
        return 0;
 }
 
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap)
+{
+       struct remap_pfn r;
+       int err;
+
 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+       GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+       /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+       r.mm = vma->vm_mm;
+       r.pfn = pfn;
+       r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+                         (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+       err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
+}
 
 /**
  * remap_io_sg - remap an IO mapping to userspace
index ee8e753..eae0abd 100644 (file)
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
 
        for (n = 0; n < smoke[0].ncontexts; n++) {
                smoke[0].contexts[n] = live_context(i915, file);
-               if (!smoke[0].contexts[n]) {
-                       ret = -ENOMEM;
+               if (IS_ERR(smoke[0].contexts[n])) {
+                       ret = PTR_ERR(smoke[0].contexts[n]);
                        goto out_contexts;
                }
        }
index b3fd350..5275b27 100644 (file)
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
         * porches and sync.
         */
        /* (ps/s) / (pixels/s) = ps/pixels */
-       pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+       pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
        dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
                pclk);
 
index b4d8e1b..f6c1b62 100644 (file)
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
         * GPU registers so we need to add 0x1a800 to the register value on A630
         * to get the right value from PM4.
         */
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_start));
 
        /* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_end));
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_end));
 
        /* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        OUT_RING(ring, submit->seqno);
 
        trace_msm_gpu_submit_flush(submit,
-               gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
-                       REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+                       REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
 
        a6xx_flush(gpu, ring);
 }
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
        gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
 }
 
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+       A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+       A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+       A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       const u32 *regs = a6xx_protect;
+       unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+       BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+       BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+       if (adreno_is_a650(adreno_gpu)) {
+               regs = a650_protect;
+               count = ARRAY_SIZE(a650_protect);
+               count_max = 48;
+       }
+
+       /*
+        * Enable access protection to privileged registers, fault on an access
+        * protect violation and select the last span to protect from the start
+        * address all the way to the end of the register address space
+        */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+       for (i = 0; i < count - 1; i++)
+               gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+       /* last CP_PROTECT to have "infinite" length on the last entry */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
                rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
        gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
        gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
-               uavflagprd_inv >> 4 | lower_bit << 1);
+               uavflagprd_inv << 4 | lower_bit << 1);
        gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
 }
 
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        }
 
        /* Protect registers from the CP */
-       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
-               A6XX_PROTECT_RDONLY(0x600, 0x51));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
-               A6XX_PROTECT_RDONLY(0xfc00, 0x3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
-               A6XX_PROTECT_RDONLY(0x0, 0x4f9));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
-               A6XX_PROTECT_RDONLY(0x501, 0xa));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
-               A6XX_PROTECT_RDONLY(0x511, 0x44));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
-               A6XX_PROTECT_RW(0xbe20, 0x11f3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
-                       A6XX_PROTECT_RDONLY(0x980, 0x4));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+       a6xx_set_cp_protect(gpu);
 
        /* Enable expanded apriv for targets that support it */
        if (gpu->hw_apriv) {
@@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
        if (ret)
                return ret;
 
-       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+       if (a6xx_gpu->shadow_bo)
                for (i = 0; i < gpu->nr_rings; i++)
                        a6xx_gpu->shadow[i] = 0;
 
index ce0610c..bb544df 100644 (file)
@@ -44,7 +44,7 @@ struct a6xx_gpu {
  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
  * registers starting at _reg.
  */
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
        ((1 << 31) | \
        (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
 
index 34bc935..6577788 100644 (file)
@@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
        pll_freq += div_u64(tmp64, multiplier);
 
        vco_rate = pll_freq;
+       pll_10nm->vco_current_rate = vco_rate;
 
        DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
            pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
index e76ce40..6f96fba 100644 (file)
@@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
        pll_freq += div_u64(tmp64, multiplier);
 
        vco_rate = pll_freq;
+       pll_7nm->vco_current_rate = vco_rate;
 
        DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
            pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
index 56df86e..369d91e 100644 (file)
@@ -1241,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
                to_msm_bo(obj)->vram_node = &vma->node;
 
+               /* Call chain get_pages() -> update_inactive() tries to
+                * access msm_obj->mm_list, but it is not initialized yet.
+                * To avoid NULL pointer dereference error, initialize
+                * mm_list to be empty.
+                */
+               INIT_LIST_HEAD(&msm_obj->mm_list);
+
                msm_gem_lock(obj);
                pages = get_pages(obj);
                msm_gem_unlock(obj);
index dfa9fdb..06bb24d 100644 (file)
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        if (rdev->uvd.vcpu_bo == NULL)
                return -EINVAL;
 
-       memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+       memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
 
        size = radeon_bo_size(rdev->uvd.vcpu_bo);
        size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        ptr = rdev->uvd.cpu_addr;
        ptr += rdev->uvd_fw->size;
 
-       memset(ptr, 0, size);
+       memset_io((void __iomem *)ptr, 0, size);
 
        return 0;
 }
index bbdfd5e..f75fb15 100644 (file)
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_disable_clk_tmds;
        }
 
-       ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+       ret = sun8i_hdmi_phy_get(hdmi, phy_node);
        of_node_put(phy_node);
        if (ret) {
                dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
 
 cleanup_encoder:
        drm_encoder_cleanup(encoder);
-       sun8i_hdmi_phy_remove(hdmi);
 err_disable_clk_tmds:
        clk_disable_unprepare(hdmi->clk_tmds);
 err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
 
        dw_hdmi_unbind(hdmi->hdmi);
-       sun8i_hdmi_phy_remove(hdmi);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
        gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
                .of_match_table = sun8i_dw_hdmi_dt_ids,
        },
 };
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+       if (ret) {
+               platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+               return ret;
+       }
+
+       return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+       platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+       platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
 
 MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
 MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
index d4b55af..74f6ed0 100644 (file)
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
        struct gpio_desc                *ddc_en;
 };
 
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
 static inline struct sun8i_dw_hdmi *
 encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
 {
        return container_of(encoder, struct sun8i_dw_hdmi, encoder);
 }
 
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
index 9994edf..c923970 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/delay.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 
 #include "sun8i_dw_hdmi.h"
 
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
        { /* sentinel */ }
 };
 
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+       struct platform_device *pdev = of_find_device_by_node(node);
+       struct sun8i_hdmi_phy *phy;
+
+       if (!pdev)
+               return -EPROBE_DEFER;
+
+       phy = platform_get_drvdata(pdev);
+       if (!phy)
+               return -EPROBE_DEFER;
+
+       hdmi->phy = phy;
+
+       put_device(&pdev->dev);
+
+       return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
-       struct device *dev = hdmi->dev;
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
        struct sun8i_hdmi_phy *phy;
        struct resource res;
        void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
                clk_prepare_enable(phy->clk_phy);
        }
 
-       hdmi->phy = phy;
+       platform_set_drvdata(pdev, phy);
 
        return 0;
 
@@ -728,9 +749,9 @@ err_put_clk_bus:
        return ret;
 }
 
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
 {
-       struct sun8i_hdmi_phy *phy = hdmi->phy;
+       struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
 
        clk_disable_unprepare(phy->clk_mod);
        clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
        clk_put(phy->clk_pll1);
        clk_put(phy->clk_mod);
        clk_put(phy->clk_bus);
+       return 0;
 }
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+       .probe  = sun8i_hdmi_phy_probe,
+       .remove = sun8i_hdmi_phy_remove,
+       .driver = {
+               .name = "sun8i-hdmi-phy",
+               .of_match_table = sun8i_hdmi_phy_of_table,
+       },
+};
index 87df251..0cb8680 100644 (file)
@@ -25,7 +25,7 @@
 #include "trace.h"
 
 /* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
 
 struct reset_control;
 
index 79bff8b..bfae8a0 100644 (file)
@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
         * dGPU sector layout.
         */
        if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
-               base |= BIT(39);
+               base |= BIT_ULL(39);
 #endif
 
        tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
index 7b88261..0ea320c 100644 (file)
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
 
                err = reset_control_assert(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to assert SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
        }
 
        err = clk_prepare_enable(sor->clk);
        if (err < 0) {
                dev_err(sor->dev, "failed to enable clock: %d\n", err);
-               return err;
+               goto rpm_put;
        }
 
        usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
                        dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
                                err);
                        clk_disable_unprepare(sor->clk);
-                       return err;
+                       goto rpm_put;
                }
 
                reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
        }
 
        return 0;
+
+rpm_put:
+       if (sor->rst)
+               pm_runtime_put(sor->dev);
+
+       return err;
 }
 
 static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
                if (!sor->aux)
                        return -EPROBE_DEFER;
 
-               if (get_device(&sor->aux->ddc.dev)) {
-                       if (try_module_get(sor->aux->ddc.owner))
-                               sor->output.ddc = &sor->aux->ddc;
-                       else
-                               put_device(&sor->aux->ddc.dev);
-               }
+               if (get_device(sor->aux->dev))
+                       sor->output.ddc = &sor->aux->ddc;
        }
 
        if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 
        err = tegra_sor_parse_dt(sor);
        if (err < 0)
-               return err;
+               goto put_aux;
 
        err = tegra_output_probe(&sor->output);
-       if (err < 0)
-               return dev_err_probe(&pdev->dev, err,
-                                    "failed to probe output\n");
+       if (err < 0) {
+               dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+               goto put_aux;
+       }
 
        if (sor->ops && sor->ops->probe) {
                err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sor);
        pm_runtime_enable(&pdev->dev);
 
-       INIT_LIST_HEAD(&sor->client.list);
+       host1x_client_init(&sor->client);
        sor->client.ops = &sor_client_ops;
        sor->client.dev = &pdev->dev;
 
-       err = host1x_client_register(&sor->client);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
-                       err);
-               goto rpm_disable;
-       }
-
        /*
         * On Tegra210 and earlier, provide our own implementation for the
         * pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
                                      sor->index);
                if (!name) {
                        err = -ENOMEM;
-                       goto unregister;
+                       goto uninit;
                }
 
                err = host1x_client_resume(&sor->client);
                if (err < 0) {
                        dev_err(sor->dev, "failed to resume: %d\n", err);
-                       goto unregister;
+                       goto uninit;
                }
 
                sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
                err = PTR_ERR(sor->clk_pad);
                dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
                        err);
-               goto unregister;
+               goto uninit;
+       }
+
+       err = __host1x_client_register(&sor->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               goto uninit;
        }
 
        return 0;
 
-unregister:
-       host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+       host1x_client_exit(&sor->client);
        pm_runtime_disable(&pdev->dev);
 remove:
+       if (sor->aux)
+               sor->output.ddc = NULL;
+
        tegra_output_remove(&sor->output);
+put_aux:
+       if (sor->aux)
+               put_device(sor->aux->dev);
+
        return err;
 }
 
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
+       if (sor->aux) {
+               put_device(sor->aux->dev);
+               sor->output.ddc = NULL;
+       }
+
        tegra_output_remove(&sor->output);
 
        return 0;
index cfd0b92..ebcffe7 100644 (file)
@@ -1172,7 +1172,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
                return -EBUSY;
 
-       if (!ttm_bo_get_unless_zero(bo)) {
+       if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+           bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+           bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+           !ttm_bo_get_unless_zero(bo)) {
                if (locked)
                        dma_resv_unlock(bo->base.resv);
                return -EBUSY;
index a1dcf7d..3d9c62b 100644 (file)
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 
                for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
                        list_for_each_entry(bo, &man->lru[j], lru) {
-                               uint32_t num_pages;
+                               uint32_t num_pages = PFN_UP(bo->base.size);
 
-                               if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
-                                   bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
-                                   bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
-                                       continue;
-
-                               num_pages = bo->ttm->num_pages;
                                ret = ttm_bo_swapout(bo, ctx, gfp_flags);
                                /* ttm_bo_swapout has dropped the lru_lock */
                                if (!ret)
index bb5529a..948b3a5 100644 (file)
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                if (!old_hvs_state->fifo_state[channel].in_use)
                        continue;
 
-               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
                if (ret)
                        drm_err(dev, "Timed out waiting for commit\n");
        }
index 46f69c5..218e371 100644 (file)
@@ -735,6 +735,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 }
 EXPORT_SYMBOL(host1x_driver_unregister);
 
+/**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+       INIT_LIST_HEAD(&client->list);
+       __mutex_init(&client->lock, "host1x client lock", key);
+       client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+       mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
 /**
  * __host1x_client_register() - register a host1x client
  * @client: host1x client
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
 {
        struct host1x *host1x;
        int err;
 
-       INIT_LIST_HEAD(&client->list);
-       __mutex_init(&client->lock, "host1x client lock", key);
-       client->usecount = 0;
-
        mutex_lock(&devices_lock);
 
        list_for_each_entry(host1x, &devices, list) {
index 4bf263c..1605549 100644 (file)
@@ -93,11 +93,11 @@ menu "Special HID drivers"
        depends on HID
 
 config HID_A4TECH
-       tristate "A4 tech mice"
+       tristate "A4TECH mice"
        depends on HID
        default !EXPERT
        help
-       Support for A4 tech X5 and WOP-35 / Trust 450L mice.
+       Support for some A4TECH mice with two scroll wheels.
 
 config HID_ACCUTOUCH
        tristate "Accutouch touch device"
@@ -922,6 +922,21 @@ config HID_SAMSUNG
        help
        Support for Samsung InfraRed remote control or keyboards.
 
+config HID_SEMITEK
+       tristate "Semitek USB keyboards"
+       depends on HID
+       help
+       Support for Semitek USB keyboards that are not fully compliant
+       with the HID standard.
+
+       There are many variants, including:
+       - GK61, GK64, GK68, GK84, GK96, etc.
+       - SK61, SK64, SK68, SK84, SK96, etc.
+       - Dierya DK61/DK66
+       - Tronsmart TK09R
+       - Woo-dy
+       - X-Bows Nature/Knight
+
 config HID_SONY
        tristate "Sony PS2/3/4 accessories"
        depends on USB_HID
index 193431e..1ea1a7c 100644 (file)
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT)    += hid-roccat.o hid-roccat-common.o \
 obj-$(CONFIG_HID_RMI)          += hid-rmi.o
 obj-$(CONFIG_HID_SAITEK)       += hid-saitek.o
 obj-$(CONFIG_HID_SAMSUNG)      += hid-samsung.o
+obj-$(CONFIG_HID_SEMITEK)      += hid-semitek.o
 obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
 obj-$(CONFIG_HID_SONY)         += hid-sony.o
 obj-$(CONFIG_HID_SPEEDLINK)    += hid-speedlink.o
index 2ab38b7..3589d99 100644 (file)
@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work)
        sensor_index = req_node->sensor_idx;
        report_id = req_node->report_id;
        node_type = req_node->report_type;
+       kfree(req_node);
 
        if (node_type == HID_FEATURE_REPORT) {
                report_size = get_feature_report(sensor_index, report_id,
@@ -142,7 +143,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
        int rc, i;
 
        dev = &privdata->pdev->dev;
-       cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+       cl_data = devm_kzalloc(dev, sizeof(*cl_data), GFP_KERNEL);
        if (!cl_data)
                return -ENOMEM;
 
@@ -175,12 +176,12 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                        rc = -EINVAL;
                        goto cleanup;
                }
-               cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+               cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
                if (!cl_data->feature_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
                }
-               cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
+               cl_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
                if (!cl_data->input_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -189,7 +190,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                info.sensor_idx = cl_idx;
                info.dma_address = cl_data->sensor_dma_addr[i];
 
-               cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+               cl_data->report_descr[i] =
+                       devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
                if (!cl_data->report_descr[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -214,11 +216,11 @@ cleanup:
                                          cl_data->sensor_virt_addr[i],
                                          cl_data->sensor_dma_addr[i]);
                }
-               kfree(cl_data->feature_report[i]);
-               kfree(cl_data->input_report[i]);
-               kfree(cl_data->report_descr[i]);
+               devm_kfree(dev, cl_data->feature_report[i]);
+               devm_kfree(dev, cl_data->input_report[i]);
+               devm_kfree(dev, cl_data->report_descr[i]);
        }
-       kfree(cl_data);
+       devm_kfree(dev, cl_data);
        return rc;
 }
 
@@ -241,6 +243,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
                                          cl_data->sensor_dma_addr[i]);
                }
        }
-       kfree(cl_data);
        return 0;
 }
index 4f98948..5ad1e7a 100644 (file)
@@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
        int i;
 
        for (i = 0; i < cli_data->num_hid_devices; ++i) {
-               kfree(cli_data->feature_report[i]);
-               kfree(cli_data->input_report[i]);
-               kfree(cli_data->report_descr[i]);
                if (cli_data->hid_sensor_hubs[i]) {
                        kfree(cli_data->hid_sensor_hubs[i]->driver_data);
                        hid_destroy_device(cli_data->hid_sensor_hubs[i]);
index 3a8c4a5..2cbc32d 100644 (file)
@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
+               .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { }
 };
 MODULE_DEVICE_TABLE(hid, a4_devices);
index 2ab22b9..fca8fc7 100644 (file)
@@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_T100_KEYBOARD            BIT(6)
 #define QUIRK_T100CHI                  BIT(7)
 #define QUIRK_G752_KEYBOARD            BIT(8)
-#define QUIRK_T101HA_DOCK              BIT(9)
-#define QUIRK_T90CHI                   BIT(10)
-#define QUIRK_MEDION_E1239T            BIT(11)
-#define QUIRK_ROG_NKEY_KEYBOARD                BIT(12)
+#define QUIRK_T90CHI                   BIT(9)
+#define QUIRK_MEDION_E1239T            BIT(10)
+#define QUIRK_ROG_NKEY_KEYBOARD                BIT(11)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev,
        if (drvdata->quirks & QUIRK_MEDION_E1239T)
                return asus_e1239t_event(drvdata, data, size);
 
-       if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+       if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
                /*
                 * Skip these report ID, the device emits a continuous stream associated
                 * with the AURA mode it is in which looks like an 'echo'.
@@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev,
                                return -1;
                        }
                }
+               if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+                       /*
+                        * G713 and G733 send these codes on some keypresses, depending on
+                        * the key pressed it can trigger a shutdown event if not caught.
+                       */
+                       if(data[0] == 0x02 && data[1] == 0x30) {
+                               return -1;
+                       }
+               }
+
        }
 
        return 0;
@@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
-       /* use hid-multitouch for T101HA touchpad */
-       if (id->driver_data & QUIRK_T101HA_DOCK &&
-           hdev->collection->usage == HID_GD_MOUSE)
-               return -ENODEV;
-
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
-               USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
                QUIRK_MEDION_E1239T },
+       /*
+        * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+        * part, while letting hid-multitouch.c handle the touchpad.
+        */
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+               USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 0ae9f6d..0de2788 100644 (file)
@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
        case BUS_I2C:
                bus = "I2C";
                break;
+       case BUS_VIRTUAL:
+               bus = "VIRTUAL";
+               break;
        default:
                bus = "<UNKNOWN>";
        }
@@ -2588,7 +2591,6 @@ int hid_check_keys_pressed(struct hid_device *hid)
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
 
 static int __init hid_init(void)
index 59f8d71..a311fb8 100644 (file)
@@ -930,6 +930,9 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_APPSELECT] = "AppSelect",
        [KEY_SCREENSAVER] = "ScreenSaver",
        [KEY_VOICECOMMAND] = "VoiceCommand",
+       [KEY_ASSISTANT] = "Assistant",
+       [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+       [KEY_EMOJI_PICKER] = "EmojiPicker",
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
index a575160..f43a840 100644 (file)
@@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report {
        u8 address;             /* 7-bit I2C address */
        u8 flag;                /* I2C transaction condition */
        u8 length;              /* data payload length */
-       u8 data[60];            /* data payload */
+       u8 data[FT260_WR_DATA_MAX]; /* data payload */
 } __packed;
 
 struct ft260_i2c_read_request_report {
@@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev,
 
        ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
                                 HID_REQ_GET_REPORT);
-       memcpy(data, buf, len);
+       if (likely(ret == len))
+               memcpy(data, buf, len);
+       else if (ret >= 0)
+               ret = -EIO;
        kfree(buf);
        return ret;
 }
@@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
                                           (u8 *)&report, sizeof(report));
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                hid_err(hdev, "failed to retrieve status: %d\n", ret);
                return ret;
        }
@@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
        struct ft260_i2c_write_request_report *rep =
                (struct ft260_i2c_write_request_report *)dev->write_buf;
 
+       if (data_len >= sizeof(rep->data))
+               return -EINVAL;
+
        rep->address = addr;
        rep->data[0] = cmd;
        rep->length = data_len + 1;
@@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev,
 
        ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
                                           (u8 *)cfg, len);
-       if (ret != len) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve system status\n");
-               if (ret >= 0)
-                       return -EIO;
+               return ret;
        }
        return 0;
 }
@@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
 }
@@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
 }
@@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
                                           (u8 *)&version, sizeof(version));
-       if (ret != sizeof(version)) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve chip version\n");
-               if (ret >= 0)
-                       ret = -EIO;
                goto err_hid_close;
        }
 
index 898871c..29ccb0a 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
        { }
 };
+MODULE_DEVICE_TABLE(hid, gt683r_led_id);
 
 static void gt683r_brightness_set(struct led_classdev *led_cdev,
                                enum led_brightness brightness)
index 84b8da3..b84a0a1 100644 (file)
@@ -26,6 +26,7 @@
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
 #define USB_DEVICE_ID_A4TECH_X5_005D   0x000a
 #define USB_DEVICE_ID_A4TECH_RP_649    0x001a
+#define USB_DEVICE_ID_A4TECH_NB_95     0x022b
 
 #define USB_VENDOR_ID_AASHIMA          0x06d6
 #define USB_DEVICE_ID_AASHIMA_GAMEPAD  0x0025
 
 #define USB_VENDOR_ID_CORSAIR          0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
-
-#define USB_VENDOR_ID_CORSAIR           0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K70R      0x1b09
 #define USB_DEVICE_ID_CORSAIR_K95RGB    0x1b11
 #define USB_DEVICE_ID_CORSAIR_M65RGB    0x1b12
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_LENOVO_X1_TAB3   0x60b5
+#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E    0x600e
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019     0x6019
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E     0x602e
 #define USB_DEVICE_ID_SAITEK_X52       0x075c
 #define USB_DEVICE_ID_SAITEK_X52_2     0x0255
 #define USB_DEVICE_ID_SAITEK_X52_PRO   0x0762
+#define USB_DEVICE_ID_SAITEK_X65       0x0b6a
 
 #define USB_VENDOR_ID_SAMSUNG          0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD      0x0023
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2     0x0027
 
+#define USB_VENDOR_ID_SEMITEK  0x1ea7
+#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907
+
 #define USB_VENDOR_ID_SENNHEISER       0x1395
 #define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
 
 #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A      0x2819
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A      0x6e21
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
index 18f5e28..abbfa91 100644 (file)
@@ -964,6 +964,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
+
+               case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
+
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
                case 0x0e2: map_key_clear(KEY_MUTE);            break;
                case 0x0e5: map_key_clear(KEY_BASSBOOST);       break;
index d598094..fee4e54 100644 (file)
@@ -1263,6 +1263,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
        int status;
 
        long flags = (long) data[2];
+       *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
 
        if (flags & 0x80)
                switch (flags & 0x07) {
index 2bb473d..8bcaee4 100644 (file)
@@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev,
        if (id->vendor == USB_VENDOR_ID_APPLE &&
            id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
            hdev->type != HID_TYPE_USBMOUSE)
-               return 0;
+               return -ENODEV;
 
        msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
        if (msc == NULL) {
@@ -779,7 +779,10 @@ err_stop_hw:
 static void magicmouse_remove(struct hid_device *hdev)
 {
        struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-       cancel_delayed_work_sync(&msc->work);
+
+       if (msc)
+               cancel_delayed_work_sync(&msc->work);
+
        hid_hw_stop(hdev);
 }
 
index 9d9f3e1..2e4fb76 100644 (file)
@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_WIN8_PTP_BUTTONS      BIT(18)
 #define MT_QUIRK_SEPARATE_APP_REPORT   BIT(19)
 #define MT_QUIRK_FORCE_MULTI_INPUT     BIT(20)
+#define MT_QUIRK_DISABLE_WAKEUP                BIT(21)
 
 #define MT_INPUTMODE_TOUCHSCREEN       0x02
 #define MT_INPUTMODE_TOUCHPAD          0x03
@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
 #define MT_CLS_EXPORT_ALL_INPUTS               0x0013
 /* reserved                                    0x0014 */
 #define MT_CLS_WIN_8_FORCE_MULTI_INPUT         0x0015
+#define MT_CLS_WIN_8_DISABLE_WAKEUP            0x0016
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
                        MT_QUIRK_WIN8_PTP_BUTTONS |
                        MT_QUIRK_FORCE_MULTI_INPUT,
                .export_all_inputs = true },
+       { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               .quirks = MT_QUIRK_ALWAYS_VALID |
+                       MT_QUIRK_IGNORE_DUPLICATES |
+                       MT_QUIRK_HOVERING |
+                       MT_QUIRK_CONTACT_CNT_ACCURATE |
+                       MT_QUIRK_STICKY_FINGERS |
+                       MT_QUIRK_WIN8_PTP_BUTTONS |
+                       MT_QUIRK_DISABLE_WAKEUP,
+               .export_all_inputs = true },
 
        /*
         * vendor specific classes
@@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
                if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
                        continue;
 
-               for (n = 0; n < field->report_count; n++) {
-                       if (field->usage[n].hid == HID_DG_CONTACTID)
-                               rdata->is_mt_collection = true;
+               if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+                       for (n = 0; n < field->report_count; n++) {
+                               if (field->usage[n].hid == HID_DG_CONTACTID) {
+                                       rdata->is_mt_collection = true;
+                                       break;
+                               }
+                       }
                }
        }
 
@@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        return 1;
                case HID_DG_CONFIDENCE:
                        if ((cls->name == MT_CLS_WIN_8 ||
-                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+                            cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
                                (field->application == HID_DG_TOUCHPAD ||
                                 field->application == HID_DG_TOUCHSCREEN))
                                app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
                /* we do not set suffix = "Touchscreen" */
                hi->input->name = hdev->name;
                break;
-       case HID_DG_STYLUS:
-               /* force BTN_STYLUS to allow tablet matching in udev */
-               __set_bit(BTN_STYLUS, hi->input->keybit);
-               break;
        case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
                suffix = "Custom Media Keys";
                break;
+       case HID_DG_STYLUS:
+               /* force BTN_STYLUS to allow tablet matching in udev */
+               __set_bit(BTN_STYLUS, hi->input->keybit);
+               fallthrough;
        case HID_DG_PEN:
                suffix = "Stylus";
                break;
@@ -1749,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 #ifdef CONFIG_PM
 static int mt_suspend(struct hid_device *hdev, pm_message_t state)
 {
+       struct mt_device *td = hid_get_drvdata(hdev);
+
        /* High latency is desirable for power savings during S3/S0ix */
-       mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+       if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP)
+               mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+       else
+               mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+
        return 0;
 }
 
@@ -1809,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
                        USB_DEVICE_ID_ANTON_TOUCH_PAD) },
 
+       /* Asus T101HA */
+       { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+                          USB_VENDOR_ID_ASUSTEK,
+                          USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
+
        /* Asus T304UA */
        { .driver_data = MT_CLS_ASUS,
                HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
index 3dd6f15..51b39bd 100644 (file)
@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
new file mode 100644 (file)
index 0000000..ba6607d
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  HID driver for Semitek keyboards
+ *
+ *  Copyright (c) 2021 Benjamin Moody
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+                                  unsigned int *rsize)
+{
+       /* In the report descriptor for interface 2, fix the incorrect
+          description of report ID 0x04 (the report contains a
+          bitmask, not an array of keycodes.) */
+       if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) {
+               hid_info(hdev, "fixing up Semitek report descriptor\n");
+               rdesc[0x84] = 0x02;
+       }
+       return rdesc;
+}
+
+static const struct hid_device_id semitek_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) },
+       { }
+};
+MODULE_DEVICE_TABLE(hid, semitek_devices);
+
+static struct hid_driver semitek_driver = {
+       .name = "semitek",
+       .id_table = semitek_devices,
+       .report_fixup = semitek_report_fixup,
+};
+module_hid_driver(semitek_driver);
+
+MODULE_LICENSE("GPL");
index 2e66621..32c2306 100644 (file)
@@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
        struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
        int index, field_index, usage;
        char name[HID_CUSTOM_NAME_LENGTH];
-       int value;
+       int value, ret;
 
        if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
                   name) == 3) {
@@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
 
                report_id = sensor_inst->fields[field_index].attribute.
                                                                report_id;
-               sensor_hub_set_feature(sensor_inst->hsdev, report_id,
-                                      index, sizeof(value), &value);
+               ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+                                            index, sizeof(value), &value);
+               if (ret)
+                       return ret;
        } else
                return -EINVAL;
 
index 95cf88f..6abd3e2 100644 (file)
@@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        buffer_size = buffer_size / sizeof(__s32);
        if (buffer_size) {
                for (i = 0; i < buffer_size; ++i) {
-                       hid_set_field(report->field[field_index], i,
-                                     (__force __s32)cpu_to_le32(*buf32));
+                       ret = hid_set_field(report->field[field_index], i,
+                                           (__force __s32)cpu_to_le32(*buf32));
+                       if (ret)
+                               goto done_proc;
+
                        ++buf32;
                }
        }
        if (remaining_bytes) {
                value = 0;
                memcpy(&value, (u8 *)buf32, remaining_bytes);
-               hid_set_field(report->field[field_index], i,
-                             (__force __s32)cpu_to_le32(value));
+               ret = hid_set_field(report->field[field_index], i,
+                                   (__force __s32)cpu_to_le32(value));
+               if (ret)
+                       goto done_proc;
        }
        hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
        hid_hw_wait(hsdev->hdev);
index 2e452c6..f643b1c 100644 (file)
@@ -312,7 +312,7 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
        }
 
        tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
-       if (!tm_wheel->model_request) {
+       if (!tm_wheel->change_request) {
                ret = -ENOMEM;
                goto error5;
        }
index 9993133..4647461 100644 (file)
@@ -45,6 +45,7 @@
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(5)
 #define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(6)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET    BIT(7)
 
 
 /* flags */
@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
+       /*
+        * Sending the wakeup after reset actually break ELAN touchscreen controller
+        */
+       { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+                I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
        { 0, 0 }
 };
 
@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        }
 
        /* At least some SIS devices need this after reset */
-       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
+               ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
 
 out_unlock:
        mutex_unlock(&ihid->reset_lock);
@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
        hid->product = le16_to_cpu(ihid->hdesc.wProductID);
 
-       snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
-                client->name, hid->vendor, hid->product);
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+                client->name, (u16)hid->vendor, (u16)hid->product);
        strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
 
        ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
index 21b87e4..07e3cbc 100644 (file)
@@ -28,6 +28,8 @@
 #define EHL_Ax_DEVICE_ID       0x4BB3
 #define TGL_LP_DEVICE_ID       0xA0FC
 #define TGL_H_DEVICE_ID                0x43FC
+#define ADL_S_DEVICE_ID                0x7AF8
+#define ADL_P_DEVICE_ID                0x51FC
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 06081cf..a6d5173 100644 (file)
@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index 7b27ec3..5571e74 100644 (file)
@@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid)
 
        shid->hid->dev.parent = shid->dev;
        shid->hid->bus = BUS_HOST;
-       shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
-       shid->hid->product = cpu_to_le16(shid->attrs.product);
-       shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+       shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
+       shid->hid->product = get_unaligned_le16(&shid->attrs.product);
+       shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
        shid->hid->country = shid->hid_desc.country_code;
 
        snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
index 86257ce..4e90773 100644 (file)
@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
        raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
        dir = usbhid->ctrl[usbhid->ctrltail].dir;
 
-       len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       len = hid_report_len(report);
        if (dir == USB_DIR_OUT) {
                usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
                usbhid->urbctrl->transfer_buffer_length = len;
index ea126c5..3b4ee21 100644 (file)
@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
 
        if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
            pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
+               error = -EPERM;
                hid_notice(hid,
                           "device does not support device managed pool\n");
                goto fail;
index 02298b8..731d511 100644 (file)
@@ -771,6 +771,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int corsairpsu_resume(struct hid_device *hdev)
+{
+       struct corsairpsu_data *priv = hid_get_drvdata(hdev);
+
+       /* some PSUs turn off the microcontroller during standby, so a reinit is required */
+       return corsairpsu_init(priv);
+}
+#endif
+
 static const struct hid_device_id corsairpsu_idtable[] = {
        { HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
        { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
@@ -793,6 +803,10 @@ static struct hid_driver corsairpsu_driver = {
        .probe          = corsairpsu_probe,
        .remove         = corsairpsu_remove,
        .raw_event      = corsairpsu_raw_event,
+#ifdef CONFIG_PM
+       .resume         = corsairpsu_resume,
+       .reset_resume   = corsairpsu_resume,
+#endif
 };
 module_hid_driver(corsairpsu_driver);
 
index 2970892..f2221ca 100644 (file)
@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
-       if (disallow_fan_support && index >= 8)
+       if (disallow_fan_support && index >= 20)
                return 0;
        if (disallow_fan_type_call &&
-           (index == 9 || index == 12 || index == 15))
+           (index == 21 || index == 25 || index == 28))
                return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
index e248424..aec294c 100644 (file)
@@ -37,6 +37,8 @@ struct fsp3y_data {
        struct pmbus_driver_info info;
        int chip;
        int page;
+
+       bool vout_linear_11;
 };
 
 #define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info)
@@ -108,11 +110,9 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
        int rv;
 
        /*
-        * YH5151-E outputs vout in linear11. The conversion is done when
-        * reading. Here, we have to inject pmbus_core with the correct
-        * exponent (it is -6).
+        * Inject an exponent for non-compliant YH5151-E.
         */
-       if (data->chip == yh5151e && reg == PMBUS_VOUT_MODE)
+       if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE)
                return 0x1A;
 
        rv = set_page(client, page);
@@ -161,10 +161,9 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
                return rv;
 
        /*
-        * YH-5151E is non-compliant and outputs output voltages in linear11
-        * instead of linear16.
+        * Handle YH-5151E non-compliant linear11 vout voltage.
         */
-       if (data->chip == yh5151e && reg == PMBUS_READ_VOUT)
+       if (data->vout_linear_11 && reg == PMBUS_READ_VOUT)
                rv = sign_extend32(rv, 10) & 0xffff;
 
        return rv;
@@ -256,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client)
 
        data->info = fsp3y_info[data->chip];
 
+       /*
+        * YH-5151E sometimes reports vout in linear11 and sometimes in
+        * linear16. This depends on the exact individual piece of hardware. One
+        * YH-5151E can use linear16 and another might use linear11 instead.
+        *
+        * The format can be recognized by reading VOUT_MODE - if it doesn't
+        * report a valid exponent, then vout uses linear11. Otherwise, the
+        * device is compliant and uses linear16.
+        */
+       data->vout_linear_11 = false;
+       if (data->chip == yh5151e) {
+               rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+               if (rv < 0)
+                       return rv;
+
+               if (rv == 0xFF)
+                       data->vout_linear_11 = true;
+       }
+
        return pmbus_do_probe(client, &data->info);
 }
 
index 40597a9..1a8caff 100644 (file)
@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
                info->read_word_data = raa_dmpvr2_read_word_data;
                break;
        case raa_dmpvr2_2rail_nontc:
-               info->func[0] &= ~PMBUS_HAVE_TEMP;
-               info->func[1] &= ~PMBUS_HAVE_TEMP;
+               info->func[0] &= ~PMBUS_HAVE_TEMP3;
+               info->func[1] &= ~PMBUS_HAVE_TEMP3;
                fallthrough;
        case raa_dmpvr2_2rail:
                info->pages = 2;
index b6e8b20..fa298b4 100644 (file)
@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
                dev_err(&client->dev, "Failed to read Manufacturer ID\n");
                return ret;
        }
-       if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+       if (ret != 6 || strncmp(buf, "DELTA", 5)) {
                buf[ret] = '\0';
                dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
                return -ENODEV;
index 25aac40..9198779 100644 (file)
@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
 
        scpi_scale_reading(&value, sensor);
 
+       /*
+        * Temperature sensor values are treated as signed values based on
+        * observation even though that is not explicitly specified, and
+        * because an unsigned u64 temperature does not really make practical
+        * sense especially when the temperature is below zero degrees Celsius.
+        */
+       if (sensor->info.class == TEMPERATURE)
+               return sprintf(buf, "%lld\n", (s64)value);
+
        return sprintf(buf, "%llu\n", value);
 }
 
index c2484f1..8bd6435 100644 (file)
 #define POWER_ENABLE                   0x19
 #define TPS23861_NUM_PORTS             4
 
+#define TPS23861_GENERAL_MASK_1                0x17
+#define TPS23861_CURRENT_SHUNT_MASK    BIT(0)
+
 #define TEMPERATURE_LSB                        652 /* 0.652 degrees Celsius */
 #define VOLTAGE_LSB                    3662 /* 3.662 mV */
 #define SHUNT_RESISTOR_DEFAULT         255000 /* 255 mOhm */
-#define CURRENT_LSB_255                        62260 /* 62.260 uA */
-#define CURRENT_LSB_250                        61039 /* 61.039 uA */
+#define CURRENT_LSB_250                        62260 /* 62.260 uA */
+#define CURRENT_LSB_255                        61039 /* 61.039 uA */
 #define RESISTANCE_LSB                 110966 /* 11.0966 Ohm*/
 #define RESISTANCE_LSB_LOW             157216 /* 15.7216 Ohm*/
 
@@ -117,6 +120,7 @@ struct tps23861_data {
 static struct regmap_config tps23861_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .max_register = 0x6f,
 };
 
 static int tps23861_read_temp(struct tps23861_data *data, long *val)
@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
        else
                data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
 
+       if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+               regmap_clear_bits(data->regmap,
+                                 TPS23861_GENERAL_MASK_1,
+                                 TPS23861_CURRENT_SHUNT_MASK);
+       else
+               regmap_set_bits(data->regmap,
+                               TPS23861_GENERAL_MASK_1,
+                               TPS23861_CURRENT_SHUNT_MASK);
+
        hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
                                                         data, &tps23861_chip_info,
                                                         NULL);
index 7d62cbd..354cf7e 100644 (file)
@@ -55,7 +55,7 @@
 #define ALTR_I2C_XFER_TIMEOUT  (msecs_to_jiffies(250))
 
 /**
- * altr_i2c_dev - I2C device context
+ * struct altr_i2c_dev - I2C device context
  * @base: pointer to register struct
  * @msg: pointer to current message
  * @msg_len: number of bytes transferred in msg
@@ -172,7 +172,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
        altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
 }
 
-/**
+/*
  * altr_i2c_transfer - On the last byte to be transmitted, send
  * a Stop bit on the last byte.
  */
@@ -185,7 +185,7 @@ static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data)
                writel(data, idev->base + ALTR_I2C_TFR_CMD);
 }
 
-/**
+/*
  * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of
  * transfer. Send a Stop bit on the last byte.
  */
@@ -201,9 +201,8 @@ static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev)
        }
 }
 
-/**
+/*
  * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
- * @return: Number of bytes left to transfer.
  */
 static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev)
 {
index 07b710a..6d635a7 100644 (file)
@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void geni_i2c_shutdown(struct platform_device *pdev)
+{
+       struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+       /* Make client i2c transfers start failing */
+       i2c_mark_adapter_suspended(&gi2c->adap);
+}
+
 static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
 {
        int ret;
@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
 {
        struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
 
+       i2c_mark_adapter_suspended(&gi2c->adap);
+
        if (!gi2c->suspended) {
                geni_i2c_runtime_suspend(dev);
                pm_runtime_disable(dev);
@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
        return 0;
 }
 
+static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
+{
+       struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+       i2c_mark_adapter_resumed(&gi2c->adap);
+       return 0;
+}
+
 static const struct dev_pm_ops geni_i2c_pm_ops = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
        SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
                                                                        NULL)
 };
@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
 static struct platform_driver geni_i2c_driver = {
        .probe  = geni_i2c_probe,
        .remove = geni_i2c_remove,
+       .shutdown = geni_i2c_shutdown,
        .driver = {
                .name = "geni_i2c",
                .pm = &geni_i2c_pm_ops,
index 3680d60..ec0c7ca 100644 (file)
@@ -65,7 +65,7 @@ static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
                *out |= SERIALI2C_RECV_LEN;
 }
 
-/**
+/*
  * The serialized I2C format is simply the following:
  * [addr little-endian][flags little-endian][len little-endian][data if write]
  * [addr little-endian][flags little-endian][len little-endian][data if write]
@@ -109,7 +109,7 @@ static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
        request->xfer.data_size = pos;
 }
 
-/**
+/*
  * The data in the BPMP -> CPU direction is composed of sequential blocks for
  * those messages that have I2C_M_RD. So, for example, if you have:
  *
index d5e15a8..64e4be1 100644 (file)
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
                goto err_free_attr;
        }
 
+       if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
+               err = -EINVAL;
+               goto err_uobj;
+       }
+
        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
        if (!qp) {
                err = -EINVAL;
index 22898d9..230a6ae 100644 (file)
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
        props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
 
-       if (!mlx4_is_slave(dev->dev))
-               err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-
        if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
                resp.response_length += sizeof(resp.hca_core_clock_offset);
-               if (!err && !mlx4_is_slave(dev->dev)) {
+               if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
                        resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
                        resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
                }
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
-       if (!rdma_is_port_valid(qp->device, flow_attr->port))
-               return ERR_PTR(-EINVAL);
-
        if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
                return ERR_PTR(-EOPNOTSUPP);
 
index eb92cef..9ce01f7 100644 (file)
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
        ib_umem_release(cq->buf.umem);
 }
 
-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
-                            struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
 {
        int i;
        void *cqe;
        struct mlx5_cqe64 *cqe64;
 
        for (i = 0; i < buf->nent; i++) {
-               cqe = get_cqe(cq, i);
+               cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
                cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
                cqe64->op_own = MLX5_CQE_INVALID << 4;
        }
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (err)
                goto err_db;
 
-       init_cq_frag_buf(cq, &cq->buf);
+       init_cq_frag_buf(&cq->buf);
 
        *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
                 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (err)
                goto ex;
 
-       init_cq_frag_buf(cq, cq->resize_buf);
+       init_cq_frag_buf(cq->resize_buf);
 
        return 0;
 
index 61475b5..7af4df7 100644 (file)
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
        struct ib_umem         *umem;
        unsigned long           user_virt;
        int                     refcnt;
+       struct mm_struct        *mm;
 };
 
 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
        mutex_lock(&context->db_page_mutex);
 
        list_for_each_entry(page, &context->db_page_list, list)
-               if (page->user_virt == (virt & PAGE_MASK))
+               if ((current->mm == page->mm) &&
+                   (page->user_virt == (virt & PAGE_MASK)))
                        goto found;
 
        page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
                kfree(page);
                goto out;
        }
+       mmgrab(current->mm);
+       page->mm = current->mm;
 
        list_add(&page->list, &context->db_page_list);
 
@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
 
        if (!--db->u.user_page->refcnt) {
                list_del(&db->u.user_page->list);
+               mmdrop(db->u.user_page->mm);
                ib_umem_release(db->u.user_page->umem);
                kfree(db->u.user_page);
        }
index 2fc6a60..18ee2f2 100644 (file)
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                goto free_ucmd;
        }
 
-       if (flow_attr->port > dev->num_ports ||
-           (flow_attr->flags &
-            ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
+       if (flow_attr->flags &
+           ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
                err = -EINVAL;
                goto free_ucmd;
        }
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
        if (err)
                goto end;
 
+       if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+           mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+               err = -EINVAL;
+               goto end;
+       }
+
        uobj->object = obj;
        obj->mdev = dev->mdev;
        atomic_set(&obj->usecnt, 0);
index 9662cd3..425423d 100644 (file)
@@ -1940,8 +1940,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
                mlx5r_deref_wait_odp_mkey(&mr->mmkey);
 
        if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
-               xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
-                          NULL, GFP_KERNEL);
+               xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+                          mr->sig, NULL, GFP_KERNEL);
 
                if (mr->mtt_mr) {
                        rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
index d5a90a6..5b05cf3 100644 (file)
@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
 
 static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
        .kind           = "ipoib",
+       .netns_refund   = true,
        .maxtype        = IFLA_IPOIB_MAX,
        .policy         = ipoib_policy,
        .priv_size      = sizeof(struct ipoib_dev_priv),
index 37a23aa..66d623f 100644 (file)
@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
                nmi_exit();
 }
 
+static u32 do_read_iar(struct pt_regs *regs)
+{
+       u32 iar;
+
+       if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
+               u64 pmr;
+
+               /*
+                * We were in a context with IRQs disabled. However, the
+                * entry code has set PMR to a value that allows any
+                * interrupt to be acknowledged, and not just NMIs. This can
+                * lead to surprising effects if the NMI has been retired in
+                * the meantime, and that there is an IRQ pending. The IRQ
+                * would then be taken in NMI context, something that nobody
+                * wants to debug twice.
+                *
+                * Until we sort this, drop PMR again to a level that will
+                * actually only allow NMIs before reading IAR, and then
+                * restore it to what it was.
+                */
+               pmr = gic_read_pmr();
+               gic_pmr_mask_irqs();
+               isb();
+
+               iar = gic_read_iar();
+
+               gic_write_pmr(pmr);
+       } else {
+               iar = gic_read_iar();
+       }
+
+       return iar;
+}
+
 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 {
        u32 irqnr;
 
-       irqnr = gic_read_iar();
+       irqnr = do_read_iar(regs);
 
        /* Check for special IDs first */
        if ((irqnr >= 1020 && irqnr <= 1023))
index 0a4551e..5fc989a 100644 (file)
@@ -364,7 +364,6 @@ struct cached_dev {
 
        /* The rest of this all shows up in sysfs */
        unsigned int            sequential_cutoff;
-       unsigned int            readahead;
 
        unsigned int            io_disable:1;
        unsigned int            verify:1;
index 29c2317..6d1de88 100644 (file)
@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                                 struct bio *bio, unsigned int sectors)
 {
        int ret = MAP_CONTINUE;
-       unsigned int reada = 0;
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
        struct bio *miss, *cache_bio;
+       unsigned int size_limit;
 
        s->cache_missed = 1;
 
@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                goto out_submit;
        }
 
-       if (!(bio->bi_opf & REQ_RAHEAD) &&
-           !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
-           s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
-               reada = min_t(sector_t, dc->readahead >> 9,
-                             get_capacity(bio->bi_bdev->bd_disk) -
-                             bio_end_sector(bio));
-
-       s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+       /* Limitation for valid replace key size and cache_bio bvecs number */
+       size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+                          (1 << KEY_SIZE_BITS) - 1);
+       s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
 
        s->iop.replace_key = KEY(s->iop.inode,
                                 bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->iop.replace = true;
 
-       miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+       miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+                             &s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
        ret = miss == bio ? MAP_DONE : -EINTR;
@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
                goto out_put;
 
-       if (reada)
-               bch_mark_cache_readahead(s->iop.c, s->d);
-
        s->cache_miss   = miss;
        s->iop.bio      = cache_bio;
        bio_get(cache_bio);
index 503aafe..4c7ee5f 100644 (file)
@@ -46,7 +46,6 @@ read_attribute(cache_misses);
 read_attribute(cache_bypass_hits);
 read_attribute(cache_bypass_misses);
 read_attribute(cache_hit_ratio);
-read_attribute(cache_readaheads);
 read_attribute(cache_miss_collisions);
 read_attribute(bypassed);
 
@@ -64,7 +63,6 @@ SHOW(bch_stats)
                    DIV_SAFE(var(cache_hits) * 100,
                             var(cache_hits) + var(cache_misses)));
 
-       var_print(cache_readaheads);
        var_print(cache_miss_collisions);
        sysfs_hprint(bypassed,  var(sectors_bypassed) << 9);
 #undef var
@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
        &sysfs_cache_bypass_hits,
        &sysfs_cache_bypass_misses,
        &sysfs_cache_hit_ratio,
-       &sysfs_cache_readaheads,
        &sysfs_cache_miss_collisions,
        &sysfs_bypassed,
        NULL
@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
        acc->total.cache_misses = 0;
        acc->total.cache_bypass_hits = 0;
        acc->total.cache_bypass_misses = 0;
-       acc->total.cache_readaheads = 0;
        acc->total.cache_miss_collisions = 0;
        acc->total.sectors_bypassed = 0;
 }
@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
                scale_stat(&stats->cache_misses);
                scale_stat(&stats->cache_bypass_hits);
                scale_stat(&stats->cache_bypass_misses);
-               scale_stat(&stats->cache_readaheads);
                scale_stat(&stats->cache_miss_collisions);
                scale_stat(&stats->sectors_bypassed);
        }
@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
        move_stat(cache_misses);
        move_stat(cache_bypass_hits);
        move_stat(cache_bypass_misses);
-       move_stat(cache_readaheads);
        move_stat(cache_miss_collisions);
        move_stat(sectors_bypassed);
 
@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
        mark_cache_stats(&c->accounting.collector, hit, bypass);
 }
 
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
-{
-       struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
-       atomic_inc(&dc->accounting.collector.cache_readaheads);
-       atomic_inc(&c->accounting.collector.cache_readaheads);
-}
-
 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
 {
        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
index abfaabf..ca4f435 100644 (file)
@@ -7,7 +7,6 @@ struct cache_stat_collector {
        atomic_t cache_misses;
        atomic_t cache_bypass_hits;
        atomic_t cache_bypass_misses;
-       atomic_t cache_readaheads;
        atomic_t cache_miss_collisions;
        atomic_t sectors_bypassed;
 };
index cc89f31..05ac1d6 100644 (file)
@@ -137,7 +137,6 @@ rw_attribute(io_disable);
 rw_attribute(discard);
 rw_attribute(running);
 rw_attribute(label);
-rw_attribute(readahead);
 rw_attribute(errors);
 rw_attribute(io_error_limit);
 rw_attribute(io_error_halflife);
@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
        var_printf(partial_stripes_expensive,   "%u");
 
        var_hprint(sequential_cutoff);
-       var_hprint(readahead);
 
        sysfs_print(running,            atomic_read(&dc->running));
        sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
@@ -365,7 +363,6 @@ STORE(__cached_dev)
        sysfs_strtoul_clamp(sequential_cutoff,
                            dc->sequential_cutoff,
                            0, UINT_MAX);
-       d_strtoi_h(readahead);
 
        if (attr == &sysfs_clear_stats)
                bch_cache_accounting_clear(&dc->accounting);
@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_running,
        &sysfs_state,
        &sysfs_label,
-       &sysfs_readahead,
 #ifdef CONFIG_BCACHE_DEBUG
        &sysfs_verify,
        &sysfs_bypass_torture_test,
index a07674e..4c5621b 100644 (file)
@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
        pcr->ic_version = rtl8411_get_ic_version(pcr);
index 39a6a7e..29f5414 100644 (file)
@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 8200af2..4bcfbc9 100644 (file)
@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
 
@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
 
        rts5227_init_params(pcr);
        pcr->ops = &rts522a_pcr_ops;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
        pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
 
index 781a86d..ffc1282 100644 (file)
@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 89e6f12..c748eaf 100644 (file)
@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
 
index b2676e7..53f3a1f 100644 (file)
@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
 void rts524a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
 void rts525a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
index 080a7d6..9b42b20 100644 (file)
@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 6c64dad..1fd4e0e 100644 (file)
@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = 0x00;
        pcr->sd30_drive_sel_3v3 = 0x00;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 2733111..baf8359 100644 (file)
@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
        if (pcr->aspm_enabled == enable)
                return;
 
-       if (pcr->aspm_en & 0x02)
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
-                       FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
-       else
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
-                       FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+       if (pcr->aspm_mode == ASPM_MODE_CFG) {
+               pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+                                               PCI_EXP_LNKCTL_ASPMC,
+                                               enable ? pcr->aspm_en : 0);
+       } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+               if (pcr->aspm_en & 0x02)
+                       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+                               FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+               else
+                       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+                               FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+       }
 
        if (!enable && (pcr->aspm_en & 0x02))
                mdelay(10);
@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
                        return err;
        }
 
-       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+       if (pcr->aspm_mode == ASPM_MODE_REG)
+               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
 
        /* No CD interrupt if probing driver with card inserted.
         * So we need to initialize pcr->card_exist here.
@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
 {
        int err;
+       u16 cfg_val;
+       u8 val;
 
        spin_lock_init(&pcr->lock);
        mutex_init(&pcr->pcr_mutex);
@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
        if (!pcr->slots)
                return -ENOMEM;
 
+       if (pcr->aspm_mode == ASPM_MODE_CFG) {
+               pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
+               if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
+                       pcr->aspm_enabled = true;
+               else
+                       pcr->aspm_enabled = false;
+
+       } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+               rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+               if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+                       pcr->aspm_enabled = false;
+               else
+                       pcr->aspm_enabled = true;
+       }
+
        if (pcr->ops->fetch_vendor_settings)
                pcr->ops->fetch_vendor_settings(pcr);
 
@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        struct pcr_handle *handle;
        u32 base, len;
        int ret, i, bar = 0;
-       u8 val;
 
        dev_dbg(&(pcidev->dev),
                ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
        pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
        pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-       rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
-       if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
-               pcr->aspm_enabled = false;
-       else
-               pcr->aspm_enabled = true;
        pcr->card_inserted = 0;
        pcr->card_removed = 0;
        INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
index 635bf31..baab4c2 100644 (file)
@@ -692,14 +692,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        /* Issue CMD19 twice for each tap */
        for (i = 0; i < 2 * priv->tap_num; i++) {
+               int cmd_error;
+
                /* Set sampling clock position */
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
 
-               if (mmc_send_tuning(mmc, opcode, NULL) == 0)
+               if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
                        set_bit(i, priv->taps);
 
                if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
                        set_bit(i, priv->smpcmp);
+
+               if (cmd_error)
+                       mmc_abort_tuning(mmc, opcode);
        }
 
        ret = renesas_sdhi_select_tuning(host);
@@ -939,7 +944,7 @@ static const struct soc_device_attribute sdhi_quirks_match[]  = {
        { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
        { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
-       { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+       { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
        { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
        { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
        { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
index d174823..4ffbfd5 100644 (file)
@@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
        rtnl_lock();
        result = register_netdevice(dev);
        if (result) {
+               tty_kref_put(tty);
                rtnl_unlock();
                free_netdev(dev);
                return -ENODEV;
index 029e77d..a45865b 100644 (file)
@@ -82,6 +82,8 @@ struct mcba_priv {
        bool can_ka_first_pass;
        bool can_speed_check;
        atomic_t free_ctx_cnt;
+       void *rxbuf[MCBA_MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
 };
 
 /* CAN frame */
@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
        for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
                }
 
                buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
-                                        GFP_KERNEL, &urb->transfer_dma);
+                                        GFP_KERNEL, &buf_dma);
                if (!buf) {
                        netdev_err(netdev, "No memory left for USB buffer\n");
                        usb_free_urb(urb);
@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
                if (err) {
                        usb_unanchor_urb(urb);
                        usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
-                                         buf, urb->transfer_dma);
+                                         buf, buf_dma);
                        usb_free_urb(urb);
                        break;
                }
 
+               priv->rxbuf[i] = buf;
+               priv->rxbuf_dma[i] = buf_dma;
+
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
        }
@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
 
 static void mcba_urb_unlink(struct mcba_priv *priv)
 {
+       int i;
+
        usb_kill_anchored_urbs(&priv->rx_submitted);
+
+       for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
+               usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+                                 priv->rxbuf[i], priv->rxbuf_dma[i]);
+
        usb_kill_anchored_urbs(&priv->tx_submitted);
 }
 
index 881f887..5257148 100644 (file)
@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
 static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
                                struct ena_tx_buffer *tx_info,
                                struct xdp_frame *xdpf,
-                               void **push_hdr,
-                               u32 *push_len)
+                               struct ena_com_tx_ctx *ena_tx_ctx)
 {
        struct ena_adapter *adapter = xdp_ring->adapter;
        struct ena_com_buf *ena_buf;
-       dma_addr_t dma = 0;
+       int push_len = 0;
+       dma_addr_t dma;
+       void *data;
        u32 size;
 
        tx_info->xdpf = xdpf;
+       data = tx_info->xdpf->data;
        size = tx_info->xdpf->len;
-       ena_buf = tx_info->bufs;
 
-       /* llq push buffer */
-       *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
-       *push_hdr = tx_info->xdpf->data;
+       if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               /* Designate part of the packet for LLQ */
+               push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+
+               ena_tx_ctx->push_header = data;
+
+               size -= push_len;
+               data += push_len;
+       }
+
+       ena_tx_ctx->header_len = push_len;
 
-       if (size - *push_len > 0) {
+       if (size > 0) {
                dma = dma_map_single(xdp_ring->dev,
-                                    *push_hdr + *push_len,
-                                    size - *push_len,
+                                    data,
+                                    size,
                                     DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
                        goto error_report_dma_error;
 
-               tx_info->map_linear_data = 1;
-               tx_info->num_of_bufs = 1;
-       }
+               tx_info->map_linear_data = 0;
 
-       ena_buf->paddr = dma;
-       ena_buf->len = size;
+               ena_buf = tx_info->bufs;
+               ena_buf->paddr = dma;
+               ena_buf->len = size;
+
+               ena_tx_ctx->ena_bufs = ena_buf;
+               ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+       }
 
        return 0;
 
@@ -274,10 +286,6 @@ error_report_dma_error:
                          &xdp_ring->syncp);
        netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
 
-       xdp_return_frame_rx_napi(tx_info->xdpf);
-       tx_info->xdpf = NULL;
-       tx_info->num_of_bufs = 0;
-
        return -EINVAL;
 }
 
@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
        struct ena_com_tx_ctx ena_tx_ctx = {};
        struct ena_tx_buffer *tx_info;
        u16 next_to_use, req_id;
-       void *push_hdr;
-       u32 push_len;
        int rc;
 
        next_to_use = xdp_ring->next_to_use;
@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
        tx_info = &xdp_ring->tx_buffer_info[req_id];
        tx_info->num_of_bufs = 0;
 
-       rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
+       rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
        if (unlikely(rc))
                return rc;
 
-       ena_tx_ctx.ena_bufs = tx_info->bufs;
-       ena_tx_ctx.push_header = push_hdr;
-       ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
        ena_tx_ctx.req_id = req_id;
-       ena_tx_ctx.header_len = push_len;
 
        rc = ena_xmit_common(dev,
                             xdp_ring,
index b3d7433..7748b27 100644 (file)
@@ -1849,6 +1849,7 @@ out_free_netdev:
        free_netdev(netdev);
 out_pci_release:
        pci_release_mem_regions(pdev);
+       pci_disable_pcie_error_reporting(pdev);
 out_pci_disable:
        pci_disable_device(pdev);
        return err;
index fcc729d..aef3fcc 100644 (file)
@@ -7308,7 +7308,7 @@ skip_rdma:
        entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
                     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
        entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
-       entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
+       entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
        entries = roundup(entries, ctx->tqm_entries_multiple);
        entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
        for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
@@ -11750,6 +11750,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
        bnxt_hwrm_coal_params_qcaps(bp);
 }
 
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
 static int bnxt_fw_init_one(struct bnxt *bp)
 {
        int rc;
@@ -11764,6 +11766,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
                netdev_err(bp->dev, "Firmware init phase 2 failed\n");
                return rc;
        }
+       rc = bnxt_probe_phy(bp, false);
+       if (rc)
+               return rc;
        rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
        if (rc)
                return rc;
@@ -13155,6 +13160,7 @@ init_err_pci_clean:
        bnxt_hwrm_func_drv_unrgtr(bp);
        bnxt_free_hwrm_short_cmd_req(bp);
        bnxt_free_hwrm_resources(bp);
+       bnxt_ethtool_free(bp);
        kfree(bp->fw_health);
        bp->fw_health = NULL;
        bnxt_cleanup_pci(bp);
index 314f8d8..9058f09 100644 (file)
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
                          bool persistent, u8 *smt_idx);
 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
 void cxgb4_quiesce_rx(struct sge_rspq *q);
 int cxgb4_port_mirror_alloc(struct net_device *dev);
index 61ea3ec..83ed10a 100644 (file)
@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
                return ret;
        }
 
-       spin_lock_bh(&adap->win0_lock);
+       /* We have to RESET the chip/firmware because we need the
+        * chip in uninitialized state for loading new PHY image.
+        * Otherwise, the running firmware will only store the PHY
+        * image in local RAM which will be lost after next reset.
+        */
+       ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
+       if (ret < 0) {
+               dev_err(adap->pdev_dev,
+                       "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
+                       ret);
+               return ret;
+       }
+
        ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
-       spin_unlock_bh(&adap->win0_lock);
-       if (ret)
-               dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+       if (ret < 0) {
+               dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
+                       ret);
+               return ret;
+       }
 
-       return ret;
+       return 0;
 }
 
 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
                                                   u32 ftid)
 {
        struct tid_info *t = &adap->tids;
-       struct filter_entry *f;
 
-       if (ftid < t->nhpftids)
-               f = &adap->tids.hpftid_tab[ftid];
-       else if (ftid < t->nftids)
-               f = &adap->tids.ftid_tab[ftid - t->nhpftids];
-       else
-               f = lookup_tid(&adap->tids, ftid);
+       if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
+               return &t->hpftid_tab[ftid - t->hpftid_base];
 
-       return f;
+       if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
+               return &t->ftid_tab[ftid - t->ftid_base];
+
+       return lookup_tid(t, ftid);
 }
 
 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
        filter_id = filter_info->loc_array[cmd->fs.location];
        f = cxgb4_get_filter_entry(adapter, filter_id);
 
+       if (f->fs.prio)
+               filter_id -= adapter->tids.hpftid_base;
+       else if (!f->fs.hash)
+               filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
        ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
        if (ret)
                goto err;
@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
 
        filter_info = &adapter->ethtool_filters->port[pi->port_id];
 
+       if (fs.prio)
+               tid += adapter->tids.hpftid_base;
+       else if (!fs.hash)
+               tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
        filter_info->loc_array[cmd->fs.location] = tid;
        set_bit(cmd->fs.location, filter_info->bmap);
        filter_info->in_use++;
index 22c9ac9..6260b3b 100644 (file)
@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
                                      WORD_MASK, f->fs.nat_lip[3] |
                                      f->fs.nat_lip[2] << 8 |
                                      f->fs.nat_lip[1] << 16 |
-                                     (u64)f->fs.nat_lip[0] << 25, 1);
+                                     (u64)f->fs.nat_lip[0] << 24, 1);
                }
        }
 
index 421bd9b..762113a 100644 (file)
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
 /*
  * net_device operations
  */
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
        return err;
 }
 
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
 
        /* Load PHY Firmware onto adapter.
         */
-       spin_lock_bh(&adap->win0_lock);
        ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
                             (u8 *)phyf->data, phyf->size);
-       spin_unlock_bh(&adap->win0_lock);
        if (ret < 0)
                dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
                        -ret);
index 1b88bd1..dd9be22 100644 (file)
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
        if (!ch_flower)
                return -ENOENT;
 
+       rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+                              adap->flower_ht_params);
+
        ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
                                      &ch_flower->fs, ch_flower->filter_id);
        if (ret)
-               goto err;
+               netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+                          ch_flower->filter_id, ret);
 
-       ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
-                                    adap->flower_ht_params);
-       if (ret) {
-               netdev_err(dev, "Flow remove from rhashtable failed");
-               goto err;
-       }
        kfree_rcu(ch_flower, rcu);
-
-err:
        return ret;
 }
 
index 6c259de..338b04f 100644 (file)
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
         * down before configuring tc params.
         */
        if (netif_running(dev)) {
-               cxgb_close(dev);
+               netif_tx_stop_all_queues(dev);
+               netif_carrier_off(dev);
                needs_bring_up = true;
        }
 
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
        }
 
 out:
-       if (needs_bring_up)
-               cxgb_open(dev);
+       if (needs_bring_up) {
+               netif_tx_start_all_queues(dev);
+               netif_carrier_on(dev);
+       }
 
        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
        return ret;
index 1e5f2ed..6a099cb 100644 (file)
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
        if (!eosw_txq)
                return -ENOMEM;
 
+       if (!(adap->flags & CXGB4_FW_OK)) {
+               /* Don't stall caller when access to FW is lost */
+               complete(&eosw_txq->completion);
+               return -EIO;
+       }
+
        skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
index 9428ef1..a0555f4 100644 (file)
@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
  *     @addr: the start address to write
  *     @n: length of data to write in bytes
  *     @data: the data to write
+ *     @byte_oriented: whether to store data as bytes or as words
  *
  *     Writes up to a page of data (256 bytes) to the serial flash starting
  *     at the given address.  All the data must be written to the same page.
+ *     If @byte_oriented is set the write data is stored as byte stream
+ *     (i.e. matches what on disk), otherwise in big-endian.
  */
 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
-                         unsigned int n, const u8 *data)
+                         unsigned int n, const u8 *data, bool byte_oriented)
 {
-       int ret;
-       u32 buf[64];
        unsigned int i, c, left, val, offset = addr & 0xff;
+       u32 buf[64];
+       int ret;
 
        if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
                return -EINVAL;
@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
            (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
                goto unlock;
 
-       for (left = n; left; left -= c) {
+       for (left = n; left; left -= c, data += c) {
                c = min(left, 4U);
-               for (val = 0, i = 0; i < c; ++i)
-                       val = (val << 8) + *data++;
+               for (val = 0, i = 0; i < c; ++i) {
+                       if (byte_oriented)
+                               val = (val << 8) + data[i];
+                       else
+                               val = (val << 8) + data[c - i - 1];
+               }
 
                ret = sf1_write(adapter, c, c != left, 1, val);
                if (ret)
@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
        t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
 
        /* Read the page to verify the write succeeded */
-       ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+       ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+                           byte_oriented);
        if (ret)
                return ret;
 
@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
         */
        memcpy(first_page, fw_data, SF_PAGE_SIZE);
        ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
-       ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
+       ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
        if (ret)
                goto out;
 
@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                fw_data += SF_PAGE_SIZE;
-               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
                if (ret)
                        goto out;
        }
 
-       ret = t4_write_flash(adap,
-                            fw_start + offsetof(struct fw_hdr, fw_ver),
-                            sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+       ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
+                            sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
+                            true);
 out:
        if (ret)
                dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
        /* Copy the supplied PHY Firmware image to the adapter memory location
         * allocated by the adapter firmware.
         */
+       spin_lock_bh(&adap->win0_lock);
        ret = t4_memory_rw(adap, win, mtype, maddr,
                           phy_fw_size, (__be32 *)phy_fw_data,
                           T4_MEMORY_WRITE);
+       spin_unlock_bh(&adap->win0_lock);
        if (ret)
                return ret;
 
@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
                        n = size - i;
                else
                        n = SF_PAGE_SIZE;
-               ret = t4_write_flash(adap, addr, n, cfg_data);
+               ret = t4_write_flash(adap, addr, n, cfg_data, true);
                if (ret)
                        goto out;
 
@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                boot_data += SF_PAGE_SIZE;
-               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
+                                    false);
                if (ret)
                        goto out;
        }
 
        ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
-                            (const u8 *)header);
+                            (const u8 *)header, false);
 
 out:
        if (ret)
@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
        for (i = 0; i < size; i += SF_PAGE_SIZE) {
                n = min_t(u32, size - i, SF_PAGE_SIZE);
 
-               ret = t4_write_flash(adap, addr, n, cfg_data);
+               ret = t4_write_flash(adap, addr, n, cfg_data, false);
                if (ret)
                        goto out;
 
@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
        for (i = 0; i < npad; i++) {
                u8 data = 0;
 
-               ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+               ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
+                                    false);
                if (ret)
                        goto out;
        }
index 46b0dba..7c99217 100644 (file)
@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 
        unregister_netdev(net_dev);
-       free_netdev(net_dev);
 
        pci_iounmap(dev, priv->dma_io);
        pci_iounmap(dev, priv->io);
+
+       free_netdev(net_dev);
+
        pci_release_regions(dev);
        pci_clear_master(dev);
        pci_disable_device(dev);
index b6eba29..7968568 100644 (file)
@@ -5897,6 +5897,7 @@ drv_cleanup:
 unmap_bars:
        be_unmap_pci_bars(adapter);
 free_netdev:
+       pci_disable_pcie_error_reporting(pdev);
        free_netdev(netdev);
 rel_reg:
        pci_release_regions(pdev);
index 1753807..d71eac7 100644 (file)
@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
 {
        struct fec_enet_private *fep =
                container_of(cc, struct fec_enet_private, cc);
-       const struct platform_device_id *id_entry =
-               platform_get_device_id(fep->pdev);
        u32 tempval;
 
        tempval = readl(fep->hwp + FEC_ATIME_CTRL);
        tempval |= FEC_T_CTRL_CAPTURE;
        writel(tempval, fep->hwp + FEC_ATIME_CTRL);
 
-       if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+       if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
                udelay(1);
 
        return readl(fep->hwp + FEC_ATIME);
@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
        fep->ptp_caps.enable = fec_ptp_enable;
 
        fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+       if (!fep->cycle_speed) {
+               fep->cycle_speed = NSEC_PER_SEC;
+               dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+       }
        fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
 
        spin_lock_init(&fep->tmreg_lock);
index de70c16..b883ab8 100644 (file)
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
                result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+               if (result == I40E_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = I40E_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 46d8844..68f177a 100644 (file)
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return I40E_XDP_REDIR;
        }
 
        switch (act) {
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
                result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+               if (result == I40E_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index e35db3f..2924c67 100644 (file)
@@ -335,6 +335,7 @@ struct ice_vsi {
        struct ice_tc_cfg tc_cfg;
        struct bpf_prog *xdp_prog;
        struct ice_ring **xdp_rings;     /* XDP ring array */
+       unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
        u16 num_xdp_txq;                 /* Used XDP queues */
        u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
 
@@ -547,15 +548,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
  */
 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
 {
+       struct ice_vsi *vsi = ring->vsi;
        u16 qid = ring->q_index;
 
        if (ice_ring_is_xdp(ring))
-               qid -= ring->vsi->num_xdp_txq;
+               qid -= vsi->num_xdp_txq;
 
-       if (!ice_is_xdp_ena_vsi(ring->vsi))
+       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
                return NULL;
 
-       return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+       return xsk_get_pool_from_qid(vsi->netdev, qid);
 }
 
 /**
index d9ddd0b..99301ad 100644 (file)
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
                ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
                                                100000baseKR4_Full);
        }
-
-       /* Autoneg PHY types */
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
 }
 
 #define TEST_SET_BITS_TIMEOUT  50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
                ks->base.port = PORT_TP;
                break;
        case ICE_MEDIA_BACKPLANE:
-               ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
                ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
-               ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
                ethtool_link_ksettings_add_link_mode(ks, advertising,
                                                     Backplane);
                ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
        if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
                ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
 
+       /* Set supported and advertised autoneg */
+       if (ice_is_phy_caps_an_enabled(caps)) {
+               ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+               ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+       }
+
 done:
        kfree(caps);
        return err;
index de38a0f..9b8300d 100644 (file)
@@ -31,6 +31,7 @@
 #define PF_FW_ATQLEN_ATQOVFL_M                 BIT(29)
 #define PF_FW_ATQLEN_ATQCRIT_M                 BIT(30)
 #define VF_MBX_ARQLEN(_VF)                     (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF)                     (0x0022A800 + ((_VF) * 4))
 #define PF_FW_ATQLEN_ATQENABLE_M               BIT(31)
 #define PF_FW_ATQT                             0x00080400
 #define PF_MBX_ARQBAH                          0x0022E400
index 82e2ce2..27f9dac 100644 (file)
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->q_vectors)
                goto err_vectors;
 
+       vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+       if (!vsi->af_xdp_zc_qps)
+               goto err_zc_qps;
+
        return 0;
 
+err_zc_qps:
+       devm_kfree(dev, vsi->q_vectors);
 err_vectors:
        devm_kfree(dev, vsi->rxq_map);
 err_rxq_map:
@@ -194,6 +200,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
                break;
        case ICE_VSI_VF:
                vf = &pf->vf[vsi->vf_id];
+               if (vf->num_req_qs)
+                       vf->num_vf_qs = vf->num_req_qs;
                vsi->alloc_txq = vf->num_vf_qs;
                vsi->alloc_rxq = vf->num_vf_qs;
                /* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +296,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
 
        dev = ice_pf_to_dev(pf);
 
+       if (vsi->af_xdp_zc_qps) {
+               bitmap_free(vsi->af_xdp_zc_qps);
+               vsi->af_xdp_zc_qps = NULL;
+       }
        /* free the ring and vector containers */
        if (vsi->q_vectors) {
                devm_kfree(dev, vsi->q_vectors);
@@ -1705,12 +1717,13 @@ setup_rings:
  * ice_vsi_cfg_txqs - Configure the VSI for Tx
  * @vsi: the VSI being configured
  * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
  *
  * Return 0 on success and a negative value on error
  * Configure the Tx VSI for operation.
  */
 static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
 {
        struct ice_aqc_add_tx_qgrp *qg_buf;
        u16 q_idx = 0;
@@ -1722,7 +1735,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
 
        qg_buf->num_txqs = 1;
 
-       for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+       for (q_idx = 0; q_idx < count; q_idx++) {
                err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
                if (err)
                        goto err_cfg_txqs;
@@ -1742,7 +1755,7 @@ err_cfg_txqs:
  */
 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
 {
-       return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
+       return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
 }
 
 /**
@@ -1757,7 +1770,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
        int ret;
        int i;
 
-       ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+       ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
        if (ret)
                return ret;
 
@@ -1997,17 +2010,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
  * @rst_src: reset source
  * @rel_vmvf_num: Relative ID of VF/VM
  * @rings: Tx ring array to be stopped
+ * @count: number of Tx ring array elements
  */
 static int
 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
-                     u16 rel_vmvf_num, struct ice_ring **rings)
+                     u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
 {
        u16 q_idx;
 
        if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
                return -EINVAL;
 
-       for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+       for (q_idx = 0; q_idx < count; q_idx++) {
                struct ice_txq_meta txq_meta = { };
                int status;
 
@@ -2035,7 +2049,7 @@ int
 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
                          u16 rel_vmvf_num)
 {
-       return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
+       return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
 }
 
 /**
@@ -2044,7 +2058,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
  */
 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
 {
-       return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+       return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
 }
 
 /**
index 4ee85a2..0eb2307 100644 (file)
@@ -2555,6 +2555,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
        return (ret || xdp_ring_err) ? -ENOMEM : 0;
 }
 
+/**
+ * ice_xdp_safe_mode - XDP handler for safe mode
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
+                            struct netdev_bpf *xdp)
+{
+       NL_SET_ERR_MSG_MOD(xdp->extack,
+                          "Please provide working DDP firmware package in order to use XDP\n"
+                          "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
+       return -EOPNOTSUPP;
+}
+
 /**
  * ice_xdp - implements XDP handler
  * @dev: netdevice
@@ -6937,6 +6951,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
        .ndo_change_mtu = ice_change_mtu,
        .ndo_get_stats64 = ice_get_stats64,
        .ndo_tx_timeout = ice_tx_timeout,
+       .ndo_bpf = ice_xdp_safe_mode,
 };
 
 static const struct net_device_ops ice_netdev_ops = {
index e2b4b29..04748aa 100644 (file)
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
            struct bpf_prog *xdp_prog)
 {
        struct ice_ring *xdp_ring;
-       int err;
+       int err, result;
        u32 act;
 
        act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
                return ICE_XDP_PASS;
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
-               return ice_xmit_xdp_buff(xdp, xdp_ring);
+               result = ice_xmit_xdp_buff(xdp, xdp_ring);
+               if (result == ICE_XDP_CONSUMED)
+                       goto out_failure;
+               return result;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               return ICE_XDP_REDIR;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
@@ -2143,6 +2149,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        struct ice_tx_offload_params offload = { 0 };
        struct ice_vsi *vsi = tx_ring->vsi;
        struct ice_tx_buf *first;
+       struct ethhdr *eth;
        unsigned int count;
        int tso, csum;
 
@@ -2189,7 +2196,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
                goto out_drop;
 
        /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
-       if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+       eth = (struct ethhdr *)skb_mac_header(skb);
+       if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+                     eth->h_proto == htons(ETH_P_LLDP)) &&
                     vsi->type == ICE_VSI_PF &&
                     vsi->port_info->qos_cfg.is_sw_lldp))
                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
index a1d22d2..97a46c6 100644 (file)
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
         */
        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 
-       /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
-        * in the case of VFR. If this is done for PFR, it can mess up VF
-        * resets because the VF driver may already have started cleanup
-        * by the time we get here.
+       /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+        * needs to clear them in the case of VFR/VFLR. If this is done for
+        * PFR, it can mess up VF resets because the VF driver may already
+        * have started cleanup by the time we get here.
         */
-       if (!is_pfr)
+       if (!is_pfr) {
                wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+               wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+       }
 
        /* In the case of a VFLR, the HW has already reset the VF and we
         * just need to clean up, so don't hit the VFRTRIG register.
@@ -1698,7 +1700,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
                ice_vf_ctrl_vsi_release(vf);
 
        ice_vf_pre_vsi_rebuild(vf);
-       ice_vf_rebuild_vsi_with_release(vf);
+
+       if (ice_vf_rebuild_vsi_with_release(vf)) {
+               dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+               return false;
+       }
+
        ice_vf_post_vsi_rebuild(vf);
 
        /* if the VF has been reset allow it to come up again */
index faa7b8d..a1f89ea 100644 (file)
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
        if (!pool)
                return -EINVAL;
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
 
        return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        return 0;
 }
 
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return ICE_XDP_REDIR;
        }
 
        switch (act) {
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
                result = ice_xmit_xdp_buff(xdp, xdp_ring);
+               if (result == ICE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
index 7bda8c5..2d3daf0 100644 (file)
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
 void igb_ptp_tx_hang(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                       struct sk_buff *skb);
+                       ktime_t *timestamp);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
index 038a9fd..b2a042f 100644 (file)
@@ -8280,7 +8280,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
                                         struct igb_rx_buffer *rx_buffer,
                                         struct xdp_buff *xdp,
-                                        union e1000_adv_rx_desc *rx_desc)
+                                        ktime_t timestamp)
 {
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8300,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
        if (unlikely(!skb))
                return NULL;
 
-       if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
-               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
-                       xdp->data += IGB_TS_HDR_LEN;
-                       size -= IGB_TS_HDR_LEN;
-               }
-       }
+       if (timestamp)
+               skb_hwtstamps(skb)->hwtstamp = timestamp;
 
        /* Determine available headroom for copy */
        headlen = size;
@@ -8336,7 +8332,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
                                     struct igb_rx_buffer *rx_buffer,
                                     struct xdp_buff *xdp,
-                                    union e1000_adv_rx_desc *rx_desc)
+                                    ktime_t timestamp)
 {
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8359,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
        if (metasize)
                skb_metadata_set(skb, metasize);
 
-       /* pull timestamp out of packet data */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
-                       __skb_pull(skb, IGB_TS_HDR_LEN);
-       }
+       if (timestamp)
+               skb_hwtstamps(skb)->hwtstamp = timestamp;
 
        /* update buffer offset */
 #if (PAGE_SIZE < 8192)
@@ -8401,18 +8394,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
                break;
        case XDP_TX:
                result = igb_xdp_xmit_back(adapter, xdp);
+               if (result == IGB_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
-               if (!err)
-                       result = IGB_XDP_REDIR;
-               else
-                       result = IGB_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = IGB_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
@@ -8682,7 +8677,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        while (likely(total_packets < budget)) {
                union e1000_adv_rx_desc *rx_desc;
                struct igb_rx_buffer *rx_buffer;
+               ktime_t timestamp = 0;
+               int pkt_offset = 0;
                unsigned int size;
+               void *pktbuf;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8700,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                dma_rmb();
 
                rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+               pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+               /* pull rx packet timestamp if available and valid */
+               if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+                       int ts_hdr_len;
+
+                       ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+                                                        pktbuf, &timestamp);
+
+                       pkt_offset += ts_hdr_len;
+                       size -= ts_hdr_len;
+               }
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       unsigned int offset = igb_rx_offset(rx_ring);
-                       unsigned char *hard_start;
+                       unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+                       unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
 
-                       hard_start = page_address(rx_buffer->page) +
-                                    rx_buffer->page_offset - offset;
                        xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8740,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                } else if (skb)
                        igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
                else if (ring_uses_build_skb(rx_ring))
-                       skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+                       skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+                                           timestamp);
                else
                        skb = igb_construct_skb(rx_ring, rx_buffer,
-                                               &xdp, rx_desc);
+                                               &xdp, timestamp);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
index ba61fe9..d68cd44 100644 (file)
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        dev_kfree_skb_any(skb);
 }
 
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
 /**
  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
  * @q_vector: Pointer to interrupt specific structure
  * @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
  *
  * This function is meant to retrieve a timestamp from the first buffer of an
  * incoming frame.  The value is stored in little endian format starting on
  * byte 8
  *
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
  **/
 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                       struct sk_buff *skb)
+                       ktime_t *timestamp)
 {
        struct igb_adapter *adapter = q_vector->adapter;
+       struct skb_shared_hwtstamps ts;
        __le64 *regval = (__le64 *)va;
        int adjust = 0;
 
        if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
-               return IGB_RET_PTP_DISABLED;
+               return 0;
 
        /* The timestamp is recorded in little endian format.
         * DWORD: 0        1        2        3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
 
        /* check reserved dwords are zero, be/le doesn't matter for zero */
        if (regval[0])
-               return IGB_RET_PTP_INVALID;
+               return 0;
 
-       igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
-                                  le64_to_cpu(regval[1]));
+       igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
 
        /* adjust timestamp for the RX latency based on link speed */
        if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
                        break;
                }
        }
-       skb_hwtstamps(skb)->hwtstamp =
-               ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
 
-       return 0;
+       *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+       return IGB_TS_HDR_LEN;
 }
 
 /**
index 069471b..f1adf15 100644 (file)
@@ -2047,20 +2047,19 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
                break;
        case XDP_TX:
                if (igc_xdp_xmit_back(adapter, xdp) < 0)
-                       res = IGC_XDP_CONSUMED;
-               else
-                       res = IGC_XDP_TX;
+                       goto out_failure;
+               res = IGC_XDP_TX;
                break;
        case XDP_REDIRECT:
                if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
-                       res = IGC_XDP_CONSUMED;
-               else
-                       res = IGC_XDP_REDIRECT;
+                       goto out_failure;
+               res = IGC_XDP_REDIRECT;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(adapter->netdev, prog, act);
                fallthrough;
        case XDP_DROP:
index c5ec17d..2ac5b82 100644 (file)
@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
                break;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf)) {
-                       result = IXGBE_XDP_CONSUMED;
-                       break;
-               }
+               if (unlikely(!xdpf))
+                       goto out_failure;
                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               if (result == IXGBE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
-               if (!err)
-                       result = IXGBE_XDP_REDIR;
-               else
-                       result = IXGBE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = IXGBE_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 91ad5b9..f72d297 100644 (file)
@@ -106,9 +106,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return IXGBE_XDP_REDIR;
        }
 
        switch (act) {
@@ -116,16 +117,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
                break;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf)) {
-                       result = IXGBE_XDP_CONSUMED;
-                       break;
-               }
+               if (unlikely(!xdpf))
+                       goto out_failure;
                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               if (result == IXGBE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index ba2ed8a..0e733cc 100644 (file)
@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
        case XDP_TX:
                xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
                result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+               if (result == IXGBEVF_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 36dc3e5..21ef2f1 100644 (file)
@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
 
 static int xrx200_alloc_skb(struct xrx200_chan *ch)
 {
+       struct sk_buff *skb = ch->skb[ch->dma.desc];
        dma_addr_t mapping;
        int ret = 0;
 
@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
                                 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
                dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+               ch->skb[ch->dma.desc] = skb;
                ret = -ENOMEM;
                goto skip;
        }
@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
        ch->dma.desc %= LTQ_DESC_NUM;
 
        if (ret) {
-               ch->skb[ch->dma.desc] = skb;
                net_dev->stats.rx_dropped++;
                netdev_err(net_dev, "failed to allocate new rx buffer\n");
                return ret;
@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
        struct xrx200_chan *ch = ptr;
 
        if (napi_schedule_prep(&ch->napi)) {
-               __napi_schedule(&ch->napi);
                ltq_dma_disable_irq(&ch->dma);
+               __napi_schedule(&ch->napi);
        }
 
        ltq_dma_ack_irq(&ch->dma);
index f6cfec8..dc4ac1a 100644 (file)
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET         0xb0
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET   0xa8
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET  0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        if (mlx4_is_mfunc(dev))
                disable_unsupported_roce_caps(outbox);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+       dev_cap->map_clock_to_user = field & 0x80;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
        dev_cap->reserved_qps = 1 << (field & 0xf);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
index 8f020f2..cf64e54 100644 (file)
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
        u32 health_buffer_addrs;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
        bool wol_port[MLX4_MAX_PORTS + 1];
+       bool map_clock_to_user;
 };
 
 struct mlx4_func_cap {
index c326b43..00c8465 100644 (file)
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
+       dev->caps.map_clock_to_user  = dev_cap->map_clock_to_user;
        dev->caps.uar_page_size      = PAGE_SIZE;
        dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
        dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
        if (mlx4_is_slave(dev))
                return -EOPNOTSUPP;
 
+       if (!dev->caps.map_clock_to_user) {
+               mlx4_dbg(dev, "Map clock to user is not supported.\n");
+               return -EOPNOTSUPP;
+       }
+
        if (!params)
                return -EINVAL;
 
index a9166cd..ceebfc2 100644 (file)
@@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
        int ret = 0, i;
 
        mutex_lock(&mlx5_intf_mutex);
+       priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
        for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
                if (!priv->adev[i]) {
                        bool is_supported = false;
@@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
                        }
                } else {
                        adev = &priv->adev[i]->adev;
+
+                       /* Pay attention that this is not PCI driver that
+                        * mlx5_core_dev is connected, but auxiliary driver.
+                        *
+                        * Here we can race of module unload with devlink
+                        * reload, but we don't need to take extra lock because
+                        * we are holding global mlx5_intf_mutex.
+                        */
+                       if (!adev->dev.driver)
+                               continue;
                        adrv = to_auxiliary_drv(adev->dev.driver);
 
                        if (adrv->resume)
@@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
                        continue;
 
                adev = &priv->adev[i]->adev;
+               /* Auxiliary driver was unbind manually through sysfs */
+               if (!adev->dev.driver)
+                       goto skip_suspend;
+
                adrv = to_auxiliary_drv(adev->dev.driver);
 
                if (adrv->suspend) {
@@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
                        continue;
                }
 
+skip_suspend:
                del_adev(&priv->adev[i]->adev);
                priv->adev[i] = NULL;
        }
+       priv->flags |= MLX5_PRIV_FLAGS_DETACH;
        mutex_unlock(&mlx5_intf_mutex);
 }
 
@@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
        struct mlx5_priv *priv = &dev->priv;
 
        lockdep_assert_held(&mlx5_intf_mutex);
+       if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
+               return 0;
 
        delete_drivers(dev);
        if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
index 0dd7615..bc33eaa 100644 (file)
@@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct devlink_port *port;
 
+       if (!netif_device_present(dev))
+               return NULL;
        port = mlx5e_devlink_get_dl_port(priv);
        if (port->registered)
                return port;
index d907c1a..778e229 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2020 Mellanox Technologies
 
-#include <linux/ptp_classify.h>
 #include "en/ptp.h"
 #include "en/txrx.h"
 #include "en/params.h"
index ab935cc..c96668b 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "en.h"
 #include "en_stats.h"
+#include <linux/ptp_classify.h>
 
 struct mlx5e_ptpsq {
        struct mlx5e_txqsq       txqsq;
@@ -43,6 +44,27 @@ struct mlx5e_ptp {
        DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
 };
 
+static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+       struct flow_keys fk;
+
+       if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               return false;
+
+       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+               return false;
+
+       if (fk.basic.n_proto == htons(ETH_P_1588))
+               return true;
+
+       if (fk.basic.n_proto != htons(ETH_P_IP) &&
+           fk.basic.n_proto != htons(ETH_P_IPV6))
+               return false;
+
+       return (fk.basic.ip_proto == IPPROTO_UDP &&
+               fk.ports.dst == htons(PTP_EV_PORT));
+}
+
 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
                   u8 lag_port, struct mlx5e_ptp **cp);
 void mlx5e_ptp_close(struct mlx5e_ptp *c);
index be0ee03..2e9bee4 100644 (file)
@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
                                                             work);
        struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
        struct neighbour *n = update_work->n;
+       struct mlx5e_encap_entry *e = NULL;
        bool neigh_connected, same_dev;
-       struct mlx5e_encap_entry *e;
        unsigned char ha[ETH_ALEN];
-       struct mlx5e_priv *priv;
        u8 nud_state, dead;
 
        rtnl_lock();
@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
        if (!same_dev)
                goto out;
 
-       list_for_each_entry(e, &nhe->encap_list, encap_list) {
-               if (!mlx5e_encap_take(e))
-                       continue;
+       /* mlx5e_get_next_init_encap() releases previous encap before returning
+        * the next one.
+        */
+       while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
+               mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
 
-               priv = netdev_priv(e->out_dev);
-               mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
-               mlx5e_encap_put(priv, e);
-       }
 out:
        rtnl_unlock();
        mlx5e_release_neigh_update_work(update_work);
index 3113822..85eaadc 100644 (file)
@@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        ASSERT_RTNL();
 
-       /* wait for encap to be fully initialized */
-       wait_for_completion(&e->res_ready);
-
        mutex_lock(&esw->offloads.encap_tbl_lock);
        encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
-       if (e->compl_result < 0 || (encap_connected == neigh_connected &&
-                                   ether_addr_equal(e->h_dest, ha)))
+       if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
                goto unlock;
 
        mlx5e_take_all_encap_flows(e, &flow_list);
index f1fb116..490131e 100644 (file)
@@ -251,9 +251,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
                mlx5e_take_tmp_flow(flow, flow_list, 0);
 }
 
+typedef bool (match_cb)(struct mlx5e_encap_entry *);
+
 static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
-                          struct mlx5e_encap_entry *e)
+mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
+                             struct mlx5e_encap_entry *e,
+                             match_cb match)
 {
        struct mlx5e_encap_entry *next = NULL;
 
@@ -288,7 +291,7 @@ retry:
        /* wait for encap to be fully initialized */
        wait_for_completion(&next->res_ready);
        /* continue searching if encap entry is not in valid state after completion */
-       if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+       if (!match(next)) {
                e = next;
                goto retry;
        }
@@ -296,6 +299,30 @@ retry:
        return next;
 }
 
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
+{
+       return e->flags & MLX5_ENCAP_ENTRY_VALID;
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+                          struct mlx5e_encap_entry *e)
+{
+       return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
+}
+
+static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
+{
+       return e->compl_result >= 0;
+}
+
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+                         struct mlx5e_encap_entry *e)
+{
+       return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
+}
+
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
 {
        struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
index 3d45341..26f7fab 100644 (file)
@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
        struct mlx5_core_dev *mdev = priv->mdev;
        struct net_device *netdev = priv->netdev;
 
-       if (!priv->ipsec)
-               return;
-
        if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
            !MLX5_CAP_ETH(mdev, swp)) {
                mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
index 5cd466e..25403af 100644 (file)
@@ -356,7 +356,7 @@ err:
 
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
 {
-       int err = 0;
+       int err = -ENOMEM;
        int i;
 
        if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
index 8360289..d6513ae 100644 (file)
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       unsigned long fec_bitmap;
        u16 fec_policy = 0;
        int mode;
        int err;
 
-       if (bitmap_weight((unsigned long *)&fecparam->fec,
-                         ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+       bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+       if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
                return -EOPNOTSUPP;
 
        for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        if (curr_val == new_val)
                return 0;
 
+       if (new_val && !priv->profile->rx_ptp_support &&
+           priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+               netdev_err(priv->netdev,
+                          "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+               return -EINVAL;
+       }
+
        new_params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
        if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
index ad0f694..d26b8ed 100644 (file)
@@ -2705,8 +2705,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
        nch = priv->channels.params.num_channels;
        ntc = priv->channels.params.num_tc;
        num_rxqs = nch * priv->profile->rq_groups;
-       if (priv->channels.params.ptp_rx)
-               num_rxqs++;
 
        mlx5e_netdev_set_tcs(netdev, nch, ntc);
 
@@ -3858,6 +3856,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                        netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
        }
 
+       if (mlx5e_is_uplink_rep(priv)) {
+               features &= ~NETIF_F_HW_TLS_RX;
+               if (netdev->features & NETIF_F_HW_TLS_RX)
+                       netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+               features &= ~NETIF_F_HW_TLS_TX;
+               if (netdev->features & NETIF_F_HW_TLS_TX)
+                       netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+       }
+
        mutex_unlock(&priv->state_lock);
 
        return features;
@@ -3974,11 +3982,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
        return mlx5e_ptp_rx_manage_fs(priv, set);
 }
 
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+       bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+       int err;
+
+       if (!rx_filter)
+               /* Reset CQE compression to Admin default */
+               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+       if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+               return 0;
+
+       /* Disable CQE compression */
+       netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+       if (err)
+               netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+       return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
 {
        struct mlx5e_params new_params;
+
+       if (ptp_rx == priv->channels.params.ptp_rx)
+               return 0;
+
+       new_params = priv->channels.params;
+       new_params.ptp_rx = ptp_rx;
+       return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+                                       &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
        struct hwtstamp_config config;
        bool rx_cqe_compress_def;
+       bool ptp_rx;
        int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3998,13 +4040,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
        }
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->channels.params;
        rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
 
        /* RX HW timestamp */
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               new_params.ptp_rx = false;
+               ptp_rx = false;
                break;
        case HWTSTAMP_FILTER_ALL:
        case HWTSTAMP_FILTER_SOME:
@@ -4021,24 +4062,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
        case HWTSTAMP_FILTER_NTP_ALL:
-               new_params.ptp_rx = rx_cqe_compress_def;
                config.rx_filter = HWTSTAMP_FILTER_ALL;
+               /* ptp_rx is set if both HW TS is set and CQE
+                * compression is set
+                */
+               ptp_rx = rx_cqe_compress_def;
                break;
        default:
-               mutex_unlock(&priv->state_lock);
-               return -ERANGE;
+               err = -ERANGE;
+               goto err_unlock;
        }
 
-       if (new_params.ptp_rx == priv->channels.params.ptp_rx)
-               goto out;
+       if (!priv->profile->rx_ptp_support)
+               err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+                                                    config.rx_filter != HWTSTAMP_FILTER_NONE);
+       else
+               err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+       if (err)
+               goto err_unlock;
 
-       err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
-                                      &new_params.ptp_rx, true);
-       if (err) {
-               mutex_unlock(&priv->state_lock);
-               return err;
-       }
-out:
        memcpy(&priv->tstamp, &config, sizeof(config));
        mutex_unlock(&priv->state_lock);
 
@@ -4047,6 +4089,9 @@ out:
 
        return copy_to_user(ifr->ifr_data, &config,
                            sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+       mutex_unlock(&priv->state_lock);
+       return err;
 }
 
 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4777,22 +4822,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        }
 
        if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
-               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
-                                          NETIF_F_GSO_UDP_TUNNEL_CSUM;
-               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
-                                          NETIF_F_GSO_UDP_TUNNEL_CSUM;
-               netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
-               netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
-                                        NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
        }
 
        if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
-               netdev->hw_features     |= NETIF_F_GSO_GRE |
-                                          NETIF_F_GSO_GRE_CSUM;
-               netdev->hw_enc_features |= NETIF_F_GSO_GRE |
-                                          NETIF_F_GSO_GRE_CSUM;
-               netdev->gso_partial_features |= NETIF_F_GSO_GRE |
-                                               NETIF_F_GSO_GRE_CSUM;
+               netdev->hw_features     |= NETIF_F_GSO_GRE;
+               netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+               netdev->gso_partial_features |= NETIF_F_GSO_GRE;
        }
 
        if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
index 2c776e7..d4b0f27 100644 (file)
@@ -2015,11 +2015,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                    misc_parameters_3);
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector *dissector = rule->match.dissector;
+       enum fs_flow_table_type fs_type;
        u16 addr_type = 0;
        u8 ip_proto = 0;
        u8 *match_level;
        int err;
 
+       fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
        match_level = outer_match_level;
 
        if (dissector->used_keys &
@@ -2145,6 +2147,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                if (match.mask->vlan_id ||
                    match.mask->vlan_priority ||
                    match.mask->vlan_tpid) {
+                       if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+                                                    fs_type)) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Matching on CVLAN is not supported");
+                               return -EOPNOTSUPP;
+                       }
+
                        if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
                                MLX5_SET(fte_match_set_misc, misc_c,
                                         outer_second_svlan_tag, 1);
@@ -4756,7 +4765,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
        list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
                wait_for_completion(&hpe->res_ready);
                if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
-                       hpe->hp->pair->peer_gone = true;
+                       mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
 
                mlx5e_hairpin_put(priv, hpe);
        }
index 25c0917..1702753 100644 (file)
@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
 
 struct mlx5e_neigh_hash_entry;
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+                         struct mlx5e_encap_entry *e);
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
 
 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
index 8ba6267..320fe0c 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <linux/tcp.h>
 #include <linux/if_vlan.h>
-#include <linux/ptp_classify.h>
 #include <net/geneve.h>
 #include <net/dsfield.h>
 #include "en.h"
@@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
 }
 #endif
 
-static bool mlx5e_use_ptpsq(struct sk_buff *skb)
-{
-       struct flow_keys fk;
-
-       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
-               return false;
-
-       if (fk.basic.n_proto == htons(ETH_P_1588))
-               return true;
-
-       if (fk.basic.n_proto != htons(ETH_P_IP) &&
-           fk.basic.n_proto != htons(ETH_P_IPV6))
-               return false;
-
-       return (fk.basic.ip_proto == IPPROTO_UDP &&
-               fk.ports.dst == htons(PTP_EV_PORT));
-}
-
 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
@@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                }
 
                ptp_channel = READ_ONCE(priv->channels.ptp);
-               if (unlikely(ptp_channel) &&
-                   test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
-                   mlx5e_use_ptpsq(skb))
+               if (unlikely(ptp_channel &&
+                            test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+                            mlx5e_use_ptpsq(skb)))
                        return mlx5e_select_ptpsq(dev, skb);
 
                txq_ix = netdev_pick_tx(dev, skb, NULL);
index 77c0ca6..9403334 100644 (file)
@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
 
        eqe = next_eqe_sw(eq);
        if (!eqe)
-               return 0;
+               goto out;
 
        do {
                struct mlx5_core_cq *cq;
@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
                ++eq->cons_index;
 
        } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+
+out:
        eq_update_ci(eq, 1);
 
        if (cqn != -1)
@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
                ++eq->cons_index;
 
        } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
-       eq_update_ci(eq, 1);
 
 out:
+       eq_update_ci(eq, 1);
        mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
 
        return unlikely(recovery) ? num_eqes : 0;
index b88705a..97e6cb6 100644 (file)
@@ -1054,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
                        goto err_vhca_mapping;
        }
 
+       /* External controller host PF has factory programmed MAC.
+        * Read it from the device.
+        */
+       if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
+               mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
+
        esw_vport_change_handle_locked(vport);
 
        esw->enabled_vports++;
index db1e742..d18a28a 100644 (file)
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
                         struct mlx5_fs_chains *chains,
                         int i)
 {
-       flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+       if (mlx5_chains_ignore_flow_level_supported(chains))
+               flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
        dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
        dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
 }
index d5d5763..106b50e 100644 (file)
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
                                                      reset_abort_work);
        struct mlx5_core_dev *dev = fw_reset->dev;
 
+       if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+               return;
+
        mlx5_sync_reset_clear_reset_requested(dev, true);
        mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
 }
index 00ef10a..20a4047 100644 (file)
@@ -107,7 +107,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
        return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
 }
 
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
 {
        return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
 }
index e96f345..d50bdb2 100644 (file)
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
 
 bool
 mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
 bool
 mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
 u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
 
 #else /* CONFIG_MLX5_CLS_ACT */
 
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
 static inline struct mlx5_flow_table *
 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
                      u32 level) { return ERR_PTR(-EOPNOTSUPP); }
index a1d67bd..0d0f63a 100644 (file)
@@ -1161,7 +1161,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
        err = mlx5_core_set_hca_defaults(dev);
        if (err) {
                mlx5_core_err(dev, "Failed to set hca defaults\n");
-               goto err_sriov;
+               goto err_set_hca;
        }
 
        mlx5_vhca_event_start(dev);
@@ -1194,6 +1194,7 @@ err_ec:
        mlx5_sf_hw_table_destroy(dev);
 err_vhca:
        mlx5_vhca_event_stop(dev);
+err_set_hca:
        mlx5_cleanup_fs(dev);
 err_fs:
        mlx5_accel_tls_cleanup(dev);
index 50af84e..174f71e 100644 (file)
@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
        mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
        mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
        mkey->size = MLX5_GET64(mkc, mkc, len);
-       mkey->key |= mlx5_idx_to_mkey(mkey_index);
+       mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
        mkey->pd = MLX5_GET(mkc, mkc, pd);
        init_waitqueue_head(&mkey->wait);
 
index 441b545..540cf05 100644 (file)
@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
 {
        int err;
 
+       if (!MLX5_CAP_GEN(dev, roce))
+               return;
+
        err = mlx5_nic_vport_enable_roce(dev);
        if (err) {
                mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
index 6a0c6f9..fa0288a 100644 (file)
@@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
        sf_index = event->function_id - base_id;
        sf_dev = xa_load(&table->devices, sf_index);
        switch (event->new_vhca_state) {
+       case MLX5_VHCA_STATE_INVALID:
        case MLX5_VHCA_STATE_ALLOCATED:
                if (sf_dev)
                        mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
index 1fbcd01..7ccfd40 100644 (file)
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
        int ret;
 
        ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
-       ft_attr.level = dmn->info.caps.max_ft_level - 2;
+       ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+                             MLX5_FT_MAX_MULTIPATH_LEVEL);
        ft_attr.reformat_en = reformat_req;
        ft_attr.decap_en = reformat_req;
 
index 054c2e2..7466f01 100644 (file)
@@ -694,7 +694,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
        if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
                return -EINVAL;
 
-       memcpy(padded_data, data, data_sz);
+       inline_data_sz =
+               MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+       /* Add an alignment padding  */
+       memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
 
        /* Remove L2L3 outer headers */
        MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
@@ -706,32 +710,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
        hw_action += DR_STE_ACTION_DOUBLE_SZ;
        used_actions++; /* Remove and NOP are a single double action */
 
-       inline_data_sz =
-               MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+       /* Point to the last dword of the header */
+       data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
 
-       /* Add the new header inline + 2 extra bytes */
+       /* Add the new header using inline action 4Byte at a time, the header
+        * is added in reversed order to the beginning of the packet to avoid
+        * incorrect parsing by the HW. Since header is 14B or 18B an extra
+        * two bytes are padded and later removed.
+        */
        for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
                void *addr_inline;
 
                MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
                         DR_STE_V1_ACTION_ID_INSERT_INLINE);
                /* The hardware expects here offset to words (2 bytes) */
-               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
-                        i * 2);
+               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
 
                /* Copy bytes one by one to avoid endianness problem */
                addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
                                           hw_action, inline_data);
-               memcpy(addr_inline, data_ptr, inline_data_sz);
+               memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
                hw_action += DR_STE_ACTION_DOUBLE_SZ;
-               data_ptr += inline_data_sz;
                used_actions++;
        }
 
-       /* Remove 2 extra bytes */
+       /* Remove first 2 extra bytes */
        MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
                 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
-       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
        /* The hardware expects here size in words (2 bytes) */
        MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
        used_actions++;
index 612b0ac..9737565 100644 (file)
@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
 static inline bool
 mlx5dr_is_supported(struct mlx5_core_dev *dev)
 {
-       return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
-              (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
-               (MLX5_CAP_GEN(dev, steering_format_version) <=
-                MLX5_STEERING_FORMAT_CONNECTX_6DX));
+       return MLX5_CAP_GEN(dev, roce) &&
+              (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+               (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+                (MLX5_CAP_GEN(dev, steering_format_version) <=
+                 MLX5_STEERING_FORMAT_CONNECTX_6DX)));
 }
 
 /* buddy functions & structure */
index 01cc00a..b6931bb 100644 (file)
@@ -424,6 +424,15 @@ err_modify_sq:
        return err;
 }
 
+static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
+{
+       int i;
+
+       for (i = 0; i < hp->num_channels; i++)
+               mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+                                      MLX5_SQC_STATE_RST, 0, 0);
+}
+
 static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
 {
        int i;
@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
        for (i = 0; i < hp->num_channels; i++)
                mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
                                       MLX5_RQC_STATE_RST, 0, 0);
-
        /* unset peer SQs */
-       if (hp->peer_gone)
-               return;
-       for (i = 0; i < hp->num_channels; i++)
-               mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
-                                      MLX5_SQC_STATE_RST, 0, 0);
+       if (!hp->peer_gone)
+               mlx5_hairpin_unpair_peer_sq(hp);
 }
 
 struct mlx5_hairpin *
@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
        mlx5_hairpin_destroy_queues(hp);
        kfree(hp);
 }
+
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
+{
+       int i;
+
+       mlx5_hairpin_unpair_peer_sq(hp);
+
+       /* destroy peer SQ */
+       for (i = 0; i < hp->num_channels; i++)
+               mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+
+       hp->peer_gone = true;
+}
index 457ad42..4c1440a 100644 (file)
@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
        void *in;
        int err;
 
-       if (!vport)
-               return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
 
index dfea143..85f0ce2 100644 (file)
@@ -693,7 +693,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
                                                        MLXSW_THERMAL_TRIP_MASK,
                                                        module_tz,
                                                        &mlxsw_thermal_module_ops,
-                                                       NULL, 0, 0);
+                                                       NULL, 0,
+                                                       module_tz->parent->polling_delay);
        if (IS_ERR(module_tz->tzdev)) {
                err = PTR_ERR(module_tz->tzdev);
                return err;
@@ -815,7 +816,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
                                                MLXSW_THERMAL_TRIP_MASK,
                                                gearbox_tz,
                                                &mlxsw_thermal_gearbox_ops,
-                                               NULL, 0, 0);
+                                               NULL, 0,
+                                               gearbox_tz->parent->polling_delay);
        if (IS_ERR(gearbox_tz->tzdev))
                return PTR_ERR(gearbox_tz->tzdev);
 
index 900b4bf..2bc5a90 100644 (file)
@@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
 #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS       25
 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1    5
 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2    11
-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3    5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3    11
 
 static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
                                       enum mlxsw_reg_qeec_hr hr, u8 index,
index 04672eb..9958d50 100644 (file)
@@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
                           u8 band, u32 child_handle)
 {
        struct mlxsw_sp_qdisc *old_qdisc;
+       u32 parent;
 
        if (band < mlxsw_sp_qdisc->num_classes &&
            mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
@@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
        if (old_qdisc)
                mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
 
-       mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+       parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+       mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+                                                        parent);
        if (!WARN_ON(!mlxsw_sp_qdisc))
                mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
 
index 0c42833..adfb978 100644 (file)
@@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
 
 int ocelot_port_flush(struct ocelot *ocelot, int port)
 {
+       unsigned int pause_ena;
        int err, val;
 
        /* Disable dequeuing from the egress queues */
@@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
                       QSYS_PORT_MODE, port);
 
        /* Disable flow control */
+       ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
        ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
 
        /* Disable priority flow control */
@@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
        /* Clear flushing again. */
        ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
 
+       /* Re-enable flow control */
+       ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
        return err;
 }
 EXPORT_SYMBOL(ocelot_port_flush);
index c84c8bf..fc99ad8 100644 (file)
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_err(&pdev->dev,
                        "invalid sram_size %dB or board span %ldB\n",
                        mgp->sram_size, mgp->board_span);
+               status = -EINVAL;
                goto abort_with_ioremap;
        }
        memcpy_fromio(mgp->eeprom_strings,
index 7e6bac8..344ea11 100644 (file)
@@ -1602,6 +1602,8 @@ err_out_free_netdev:
        free_netdev(netdev);
 
 err_out_free_res:
+       if (NX_IS_REVISION_P3(pdev->revision))
+               pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
index 17d5b64..e81dd34 100644 (file)
@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
                p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
 
        p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+       BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+                    sizeof(p_hwfn->p_dcbx_info->set.config.params));
        memcpy(&p_hwfn->p_dcbx_info->set.config.params,
               &dcbx_info->operational.params,
-              sizeof(struct qed_dcbx_admin_params));
+              sizeof(p_hwfn->p_dcbx_info->set.config.params));
        p_hwfn->p_dcbx_info->set.config.valid = true;
 
        memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
index 96b947f..3beafc6 100644 (file)
@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
        kfree(ahw);
 
 err_out_free_res:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
index 41fbd2c..ab1e0fc 100644 (file)
@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
                              struct rtnl_link_stats64 *s)
 {
        struct rmnet_priv *priv = netdev_priv(dev);
-       struct rmnet_vnd_stats total_stats;
+       struct rmnet_vnd_stats total_stats = { };
        struct rmnet_pcpu_stats *pcpu_ptr;
+       struct rmnet_vnd_stats snapshot;
        unsigned int cpu, start;
 
-       memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
-
        for_each_possible_cpu(cpu) {
                pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
 
                do {
                        start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
-                       total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
-                       total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
-                       total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
-                       total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+                       snapshot = pcpu_ptr->stats;     /* struct assignment */
                } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
 
-               total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+               total_stats.rx_pkts += snapshot.rx_pkts;
+               total_stats.rx_bytes += snapshot.rx_bytes;
+               total_stats.tx_pkts += snapshot.tx_pkts;
+               total_stats.tx_bytes += snapshot.tx_bytes;
+               total_stats.tx_drops += snapshot.tx_drops;
        }
 
        s->rx_packets = total_stats.rx_pkts;
@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
        }
 
        return 0;
-}
\ No newline at end of file
+}
index 2c89cde..2ee72dc 100644 (file)
@@ -1671,7 +1671,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
        switch(stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+               memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
                break;
        }
 }
index c5b1548..713d362 100644 (file)
@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 {
        switch (stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *sh_eth_gstrings_stats,
+               memcpy(data, sh_eth_gstrings_stats,
                       sizeof(sh_eth_gstrings_stats));
                break;
        }
index b70d44a..3c73453 100644 (file)
@@ -76,10 +76,10 @@ enum power_event {
 #define LPI_CTRL_STATUS_TLPIEN 0x00000001      /* Transmit LPI Entry */
 
 /* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg)    (((reg > 15) ? 0x00000800 : 0x00000040) + \
-                               (reg * 8))
-#define GMAC_ADDR_LOW(reg)     (((reg > 15) ? 0x00000804 : 0x00000044) + \
-                               (reg * 8))
+#define GMAC_ADDR_HIGH(reg)    ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+                                0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg)     ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+                                0x00000044 + (reg * 8))
 #define GMAC_MAX_PERFECT_ADDRESSES     1
 
 #define GMAC_PCS_BASE          0x000000c0      /* PCS register base */
index 5d956a5..c87202c 100644 (file)
@@ -1240,8 +1240,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
        priv->phylink_config.dev = &priv->dev->dev;
        priv->phylink_config.type = PHYLINK_NETDEV;
        priv->phylink_config.pcs_poll = true;
-       priv->phylink_config.ovr_an_inband =
-               priv->plat->mdio_bus_data->xpcs_an_inband;
+       if (priv->plat->mdio_bus_data)
+               priv->phylink_config.ovr_an_inband =
+                       priv->plat->mdio_bus_data->xpcs_an_inband;
 
        if (!fwnode)
                fwnode = dev_fwnode(priv->device);
@@ -7048,7 +7049,6 @@ error_mdio_register:
        stmmac_napi_del(ndev);
 error_hw_init:
        destroy_workqueue(priv->wq);
-       stmmac_bus_clks_config(priv, false);
        bitmap_free(priv->af_xdp_zc_qps);
 
        return ret;
index 1e17a23..a696ada 100644 (file)
@@ -622,6 +622,8 @@ error_pclk_get:
 void stmmac_remove_config_dt(struct platform_device *pdev,
                             struct plat_stmmacenet_data *plat)
 {
+       clk_disable_unprepare(plat->stmmac_clk);
+       clk_disable_unprepare(plat->pclk);
        of_node_put(plat->phy_node);
        of_node_put(plat->mdio_node);
 }
index a1f5f07..9a13953 100644 (file)
@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
        stat = be32_to_cpu(cur_p->app0);
 
        while (stat & STS_CTRL_APP0_CMPLT) {
+               /* Make sure that the other fields are read after bd is
+                * released by dma
+                */
+               rmb();
                dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
                                 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
                skb = (struct sk_buff *)ptr_from_txbd(cur_p);
                if (skb)
                        dev_consume_skb_irq(skb);
-               cur_p->app0 = 0;
                cur_p->app1 = 0;
                cur_p->app2 = 0;
                cur_p->app3 = 0;
@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
                ndev->stats.tx_packets++;
                ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
 
+               /* app0 must be visible last, as it is used to flag
+                * availability of the bd
+                */
+               smp_mb();
+               cur_p->app0 = 0;
+
                lp->tx_bd_ci++;
                if (lp->tx_bd_ci >= lp->tx_bd_num)
                        lp->tx_bd_ci = 0;
@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
                if (cur_p->app0)
                        return NETDEV_TX_BUSY;
 
+               /* Make sure to read next bd app0 after this one */
+               rmb();
+
                tail++;
                if (tail >= lp->tx_bd_num)
                        tail = 0;
@@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                smp_mb();
 
                /* Space might have just been freed - check again */
-               if (temac_check_tx_bd_space(lp, num_frag))
+               if (temac_check_tx_bd_space(lp, num_frag + 1))
                        return NETDEV_TX_BUSY;
 
                netif_wake_queue(ndev);
@@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                return NETDEV_TX_OK;
        }
        cur_p->phys = cpu_to_be32(skb_dma_addr);
-       ptr_to_txbd((void *)skb, cur_p);
 
        for (ii = 0; ii < num_frag; ii++) {
                if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
 
+       /* Mark last fragment with skb address, so it can be consumed
+        * in temac_start_xmit_done()
+        */
+       ptr_to_txbd((void *)skb, cur_p);
+
        tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
        lp->tx_bd_tail++;
        if (lp->tx_bd_tail >= lp->tx_bd_num)
@@ -926,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        wmb();
        lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
 
+       if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+               netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
+               netif_stop_queue(ndev);
+       }
+
        return NETDEV_TX_OK;
 }
 
index 6515422..7685a17 100644 (file)
@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
        ax->tty = NULL;
 
        unregister_netdev(ax->dev);
+       free_netdev(ax->dev);
 }
 
 /* Perform I/O control on an active ax25 channel. */
index b9be530..ff83e00 100644 (file)
@@ -8,8 +8,8 @@
 
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/ieee802154.h>
 #include <linux/irq.h>
@@ -1388,7 +1388,7 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
 
 static struct spi_driver mrf24j40_driver = {
        .driver = {
-               .of_match_table = of_match_ptr(mrf24j40_of_match),
+               .of_match_table = mrf24j40_of_match,
                .name = "mrf24j40",
        },
        .id_table = mrf24j40_ids,
index 0d8293a..b806f2f 100644 (file)
@@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
        return 0;
 }
 
-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
        const struct mhi_net_proto *proto = mhi_netdev->proto;
index 9bd9a5c..6bbc81a 100644 (file)
@@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
 {
        int err;
 
-       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
        if (err < 0)
                return err;
 
        usleep_range(10, 20);
 
-       /* After reset FORCE_LINK_GOOD bit is set. Although the
-        * default value should be unset. Disable FORCE_LINK_GOOD
-        * for the phy to work properly.
-        */
        return phy_modify(phydev, MII_DP83867_PHYCTRL,
                         DP83867_PHYCR_FORCE_LINK_GOOD, 0);
 }
index 2e60bc1..359ea0d 100644 (file)
@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        }
 
        skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
+       dev_kfree_skb_any(skb);
        if (!skb2)
                return NULL;
 
-       dev_kfree_skb_any(skb);
        skb = skb2;
 
 done:
index b04055f..df0d183 100644 (file)
@@ -1880,7 +1880,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
 static const struct driver_info cdc_ncm_info = {
        .description = "CDC NCM",
        .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
-                       | FLAG_LINK_INTR,
+                       | FLAG_LINK_INTR | FLAG_ETHER,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
        .manage_power = usbnet_manage_power,
index 6700f19..bc55ec7 100644 (file)
@@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
        if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
                skb->protocol = htons(ETH_P_MAP);
-               return (netif_rx(skb) == NET_RX_SUCCESS);
+               return 1;
        }
 
        switch (skb->data[0] & 0xf0) {
index f6abb2f..e25bfb7 100644 (file)
@@ -8678,7 +8678,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
        switch (stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+               memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
                break;
        }
 }
index b286993..13141db 100644 (file)
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        ret = smsc75xx_wait_ready(dev, 0);
        if (ret < 0) {
                netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
-               goto err;
+               goto free_pdata;
        }
 
        smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        ret = smsc75xx_reset(dev);
        if (ret < 0) {
                netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
-               goto err;
+               goto cancel_work;
        }
 
        dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
        return 0;
 
-err:
+cancel_work:
+       cancel_work_sync(&pdata->set_multicast);
+free_pdata:
        kfree(pdata);
+       dev->data[0] = 0;
        return ret;
 }
 
@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
                cancel_work_sync(&pdata->set_multicast);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
-               pdata = NULL;
                dev->data[0] = 0;
        }
 }
index 9b6a4a8..78a01c7 100644 (file)
@@ -401,18 +401,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        /* If headroom is not 0, there is an offset between the beginning of the
         * data and the allocated space, otherwise the data and the allocated
         * space are aligned.
+        *
+        * Buffers with headroom use PAGE_SIZE as alloc size, see
+        * add_recvbuf_mergeable() + get_mergeable_buf_len()
         */
-       if (headroom) {
-               /* Buffers with headroom use PAGE_SIZE as alloc size,
-                * see add_recvbuf_mergeable() + get_mergeable_buf_len()
-                */
-               truesize = PAGE_SIZE;
-               tailroom = truesize - len - offset;
-               buf = page_address(page);
-       } else {
-               tailroom = truesize - len;
-               buf = p;
-       }
+       truesize = headroom ? PAGE_SIZE : truesize;
+       tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
+       buf = p - headroom;
 
        len -= hdr_len;
        offset += hdr_padded_len;
@@ -958,7 +953,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                put_page(page);
                                head_skb = page_to_skb(vi, rq, xdp_page, offset,
                                                       len, PAGE_SIZE, false,
-                                                      metasize, headroom);
+                                                      metasize,
+                                                      VIRTIO_XDP_HEADROOM);
                                return head_skb;
                        }
                        break;
index 503e2fd..28a6c4c 100644 (file)
@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
 
        dev->flags = IFF_MASTER | IFF_NOARP;
 
-       /* MTU is irrelevant for VRF device; set to 64k similar to lo */
-       dev->mtu = 64 * 1024;
-
        /* similarly, oper state is irrelevant; set to up to avoid confusion */
        dev->operstate = IF_OPER_UP;
        netdev_lockdep_set_classes(dev);
@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
         * which breaks networking.
         */
        dev->min_mtu = IPV6_MIN_MTU;
-       dev->max_mtu = ETH_MAX_MTU;
+       dev->max_mtu = IP6_MAX_MTU;
+       dev->mtu = dev->max_mtu;
 }
 
 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
index fc52b2c..dbe1f85 100644 (file)
@@ -1,5 +1,4 @@
-ccflags-y := -O3
-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
 ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
 wireguard-y := main.o
 wireguard-y += noise.o
index 3725e9c..b7197e8 100644 (file)
@@ -6,6 +6,8 @@
 #include "allowedips.h"
 #include "peer.h"
 
+static struct kmem_cache *node_cache;
+
 static void swap_endian(u8 *dst, const u8 *src, u8 bits)
 {
        if (bits == 32) {
@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
        node->bitlen = bits;
        memcpy(node->bits, src, bits / 8U);
 }
-#define CHOOSE_NODE(parent, key) \
-       parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static inline u8 choose(struct allowedips_node *node, const u8 *key)
+{
+       return (key[node->bit_at_a] >> node->bit_at_b) & 1;
+}
 
 static void push_rcu(struct allowedips_node **stack,
                     struct allowedips_node __rcu *p, unsigned int *len)
@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
        }
 }
 
+static void node_free_rcu(struct rcu_head *rcu)
+{
+       kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
+}
+
 static void root_free_rcu(struct rcu_head *rcu)
 {
        struct allowedips_node *node, *stack[128] = {
@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
        while (len > 0 && (node = stack[--len])) {
                push_rcu(stack, node->bit[0], &len);
                push_rcu(stack, node->bit[1], &len);
-               kfree(node);
+               kmem_cache_free(node_cache, node);
        }
 }
 
@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
        }
 }
 
-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
-                               struct wg_peer *peer, struct mutex *lock)
-{
-#define REF(p) rcu_access_pointer(p)
-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
-#define PUSH(p) ({                                                             \
-               WARN_ON(IS_ENABLED(DEBUG) && len >= 128);                      \
-               stack[len++] = p;                                              \
-       })
-
-       struct allowedips_node __rcu **stack[128], **nptr;
-       struct allowedips_node *node, *prev;
-       unsigned int len;
-
-       if (unlikely(!peer || !REF(*top)))
-               return;
-
-       for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
-               nptr = stack[len - 1];
-               node = DEREF(nptr);
-               if (!node) {
-                       --len;
-                       continue;
-               }
-               if (!prev || REF(prev->bit[0]) == node ||
-                   REF(prev->bit[1]) == node) {
-                       if (REF(node->bit[0]))
-                               PUSH(&node->bit[0]);
-                       else if (REF(node->bit[1]))
-                               PUSH(&node->bit[1]);
-               } else if (REF(node->bit[0]) == prev) {
-                       if (REF(node->bit[1]))
-                               PUSH(&node->bit[1]);
-               } else {
-                       if (rcu_dereference_protected(node->peer,
-                               lockdep_is_held(lock)) == peer) {
-                               RCU_INIT_POINTER(node->peer, NULL);
-                               list_del_init(&node->peer_list);
-                               if (!node->bit[0] || !node->bit[1]) {
-                                       rcu_assign_pointer(*nptr, DEREF(
-                                              &node->bit[!REF(node->bit[0])]));
-                                       kfree_rcu(node, rcu);
-                                       node = DEREF(nptr);
-                               }
-                       }
-                       --len;
-               }
-       }
-
-#undef REF
-#undef DEREF
-#undef PUSH
-}
-
 static unsigned int fls128(u64 a, u64 b)
 {
        return a ? fls64(a) + 64U : fls64(b);
@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
                        found = node;
                if (node->cidr == bits)
                        break;
-               node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+               node = rcu_dereference_bh(node->bit[choose(node, key)]);
        }
        return found;
 }
@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
                           u8 cidr, u8 bits, struct allowedips_node **rnode,
                           struct mutex *lock)
 {
-       struct allowedips_node *node = rcu_dereference_protected(trie,
-                                               lockdep_is_held(lock));
+       struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
        struct allowedips_node *parent = NULL;
        bool exact = false;
 
@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
                        exact = true;
                        break;
                }
-               node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
-                                                lockdep_is_held(lock));
+               node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
        }
        *rnode = parent;
        return exact;
 }
 
+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+{
+       node->parent_bit_packed = (unsigned long)parent | bit;
+       rcu_assign_pointer(*parent, node);
+}
+
+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
+{
+       u8 bit = choose(parent, node->bits);
+       connect_node(&parent->bit[bit], bit, node);
+}
+
 static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
               u8 cidr, struct wg_peer *peer, struct mutex *lock)
 {
@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
                return -EINVAL;
 
        if (!rcu_access_pointer(*trie)) {
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
+               node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
                if (unlikely(!node))
                        return -ENOMEM;
                RCU_INIT_POINTER(node->peer, peer);
                list_add_tail(&node->peer_list, &peer->allowedips_list);
                copy_and_assign_cidr(node, key, cidr, bits);
-               rcu_assign_pointer(*trie, node);
+               connect_node(trie, 2, node);
                return 0;
        }
        if (node_placement(*trie, key, cidr, bits, &node, lock)) {
@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
                return 0;
        }
 
-       newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+       newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
        if (unlikely(!newnode))
                return -ENOMEM;
        RCU_INIT_POINTER(newnode->peer, peer);
@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
        if (!node) {
                down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
        } else {
-               down = rcu_dereference_protected(CHOOSE_NODE(node, key),
-                                                lockdep_is_held(lock));
+               const u8 bit = choose(node, key);
+               down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
                if (!down) {
-                       rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+                       connect_node(&node->bit[bit], bit, newnode);
                        return 0;
                }
        }
@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
        parent = node;
 
        if (newnode->cidr == cidr) {
-               rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+               choose_and_connect_node(newnode, down);
                if (!parent)
-                       rcu_assign_pointer(*trie, newnode);
+                       connect_node(trie, 2, newnode);
                else
-                       rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
-                                          newnode);
-       } else {
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
-               if (unlikely(!node)) {
-                       list_del(&newnode->peer_list);
-                       kfree(newnode);
-                       return -ENOMEM;
-               }
-               INIT_LIST_HEAD(&node->peer_list);
-               copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+                       choose_and_connect_node(parent, newnode);
+               return 0;
+       }
 
-               rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
-               rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
-               if (!parent)
-                       rcu_assign_pointer(*trie, node);
-               else
-                       rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
-                                          node);
+       node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+       if (unlikely(!node)) {
+               list_del(&newnode->peer_list);
+               kmem_cache_free(node_cache, newnode);
+               return -ENOMEM;
        }
+       INIT_LIST_HEAD(&node->peer_list);
+       copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+       choose_and_connect_node(node, down);
+       choose_and_connect_node(node, newnode);
+       if (!parent)
+               connect_node(trie, 2, node);
+       else
+               choose_and_connect_node(parent, node);
        return 0;
 }
 
@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
 void wg_allowedips_remove_by_peer(struct allowedips *table,
                                  struct wg_peer *peer, struct mutex *lock)
 {
+       struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
+       bool free_parent;
+
+       if (list_empty(&peer->allowedips_list))
+               return;
        ++table->seq;
-       walk_remove_by_peer(&table->root4, peer, lock);
-       walk_remove_by_peer(&table->root6, peer, lock);
+       list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
+               list_del_init(&node->peer_list);
+               RCU_INIT_POINTER(node->peer, NULL);
+               if (node->bit[0] && node->bit[1])
+                       continue;
+               child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+                                                 lockdep_is_held(lock));
+               if (child)
+                       child->parent_bit_packed = node->parent_bit_packed;
+               parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+               *parent_bit = child;
+               parent = (void *)parent_bit -
+                        offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+               free_parent = !rcu_access_pointer(node->bit[0]) &&
+                             !rcu_access_pointer(node->bit[1]) &&
+                             (node->parent_bit_packed & 3) <= 1 &&
+                             !rcu_access_pointer(parent->peer);
+               if (free_parent)
+                       child = rcu_dereference_protected(
+                                       parent->bit[!(node->parent_bit_packed & 1)],
+                                       lockdep_is_held(lock));
+               call_rcu(&node->rcu, node_free_rcu);
+               if (!free_parent)
+                       continue;
+               if (child)
+                       child->parent_bit_packed = parent->parent_bit_packed;
+               *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+               call_rcu(&parent->rcu, node_free_rcu);
+       }
 }
 
 int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
        return NULL;
 }
 
+int __init wg_allowedips_slab_init(void)
+{
+       node_cache = KMEM_CACHE(allowedips_node, 0);
+       return node_cache ? 0 : -ENOMEM;
+}
+
+void wg_allowedips_slab_uninit(void)
+{
+       rcu_barrier();
+       kmem_cache_destroy(node_cache);
+}
+
 #include "selftest/allowedips.c"
index e5c83ca..2346c79 100644 (file)
@@ -15,14 +15,11 @@ struct wg_peer;
 struct allowedips_node {
        struct wg_peer __rcu *peer;
        struct allowedips_node __rcu *bit[2];
-       /* While it may seem scandalous that we waste space for v4,
-        * we're alloc'ing to the nearest power of 2 anyway, so this
-        * doesn't actually make a difference.
-        */
-       u8 bits[16] __aligned(__alignof(u64));
        u8 cidr, bit_at_a, bit_at_b, bitlen;
+       u8 bits[16] __aligned(__alignof(u64));
 
-       /* Keep rarely used list at bottom to be beyond cache line. */
+       /* Keep rarely used members at bottom to be beyond cache line. */
+       unsigned long parent_bit_packed;
        union {
                struct list_head peer_list;
                struct rcu_head rcu;
@@ -33,7 +30,7 @@ struct allowedips {
        struct allowedips_node __rcu *root4;
        struct allowedips_node __rcu *root6;
        u64 seq;
-};
+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
 
 void wg_allowedips_init(struct allowedips *table);
 void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
 bool wg_allowedips_selftest(void);
 #endif
 
+int wg_allowedips_slab_init(void);
+void wg_allowedips_slab_uninit(void);
+
 #endif /* _WG_ALLOWEDIPS_H */
index 7a7d5f1..75dbe77 100644 (file)
@@ -21,13 +21,22 @@ static int __init mod_init(void)
 {
        int ret;
 
+       ret = wg_allowedips_slab_init();
+       if (ret < 0)
+               goto err_allowedips;
+
 #ifdef DEBUG
+       ret = -ENOTRECOVERABLE;
        if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
            !wg_ratelimiter_selftest())
-               return -ENOTRECOVERABLE;
+               goto err_peer;
 #endif
        wg_noise_init();
 
+       ret = wg_peer_init();
+       if (ret < 0)
+               goto err_peer;
+
        ret = wg_device_init();
        if (ret < 0)
                goto err_device;
@@ -44,6 +53,10 @@ static int __init mod_init(void)
 err_netlink:
        wg_device_uninit();
 err_device:
+       wg_peer_uninit();
+err_peer:
+       wg_allowedips_slab_uninit();
+err_allowedips:
        return ret;
 }
 
@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
 {
        wg_genetlink_uninit();
        wg_device_uninit();
+       wg_peer_uninit();
+       wg_allowedips_slab_uninit();
 }
 
 module_init(mod_init);
index cd5cb02..1acd00a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 
+static struct kmem_cache *peer_cache;
 static atomic64_t peer_counter = ATOMIC64_INIT(0);
 
 struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
        if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
                return ERR_PTR(ret);
 
-       peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+       peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
        if (unlikely(!peer))
                return ERR_PTR(ret);
-       if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+       if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
                goto err;
 
        peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
        return peer;
 
 err:
-       kfree(peer);
+       kmem_cache_free(peer_cache, peer);
        return ERR_PTR(ret);
 }
 
@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
        /* Mark as dead, so that we don't allow jumping contexts after. */
        WRITE_ONCE(peer->is_dead, true);
 
-       /* The caller must now synchronize_rcu() for this to take effect. */
+       /* The caller must now synchronize_net() for this to take effect. */
 }
 
 static void peer_remove_after_dead(struct wg_peer *peer)
@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
        lockdep_assert_held(&peer->device->device_update_lock);
 
        peer_make_dead(peer);
-       synchronize_rcu();
+       synchronize_net();
        peer_remove_after_dead(peer);
 }
 
@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
                peer_make_dead(peer);
                list_add_tail(&peer->peer_list, &dead_peers);
        }
-       synchronize_rcu();
+       synchronize_net();
        list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
                peer_remove_after_dead(peer);
 }
@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
        /* The final zeroing takes care of clearing any remaining handshake key
         * material and other potentially sensitive information.
         */
-       kfree_sensitive(peer);
+       memzero_explicit(peer, sizeof(*peer));
+       kmem_cache_free(peer_cache, peer);
 }
 
 static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
                return;
        kref_put(&peer->refcount, kref_release);
 }
+
+int __init wg_peer_init(void)
+{
+       peer_cache = KMEM_CACHE(wg_peer, 0);
+       return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+       kmem_cache_destroy(peer_cache);
+}
index 8d53b68..76e4d31 100644 (file)
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
 void wg_peer_remove(struct wg_peer *peer);
 void wg_peer_remove_all(struct wg_device *wg);
 
+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
 #endif /* _WG_PEER_H */
index 846db14..e173204 100644 (file)
 
 #include <linux/siphash.h>
 
-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
-                                             u8 cidr)
-{
-       swap_endian(dst, src, bits);
-       memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
-       if (cidr)
-               dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
-}
-
 static __init void print_node(struct allowedips_node *node, u8 bits)
 {
        char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
-       char *fmt_declaration = KERN_DEBUG
-               "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+       char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+       u8 ip1[16], ip2[16], cidr1, cidr2;
        char *style = "dotted";
-       u8 ip1[16], ip2[16];
        u32 color = 0;
 
+       if (node == NULL)
+               return;
        if (bits == 32) {
                fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
-               fmt_declaration = KERN_DEBUG
-                       "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+               fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
        } else if (bits == 128) {
                fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
-               fmt_declaration = KERN_DEBUG
-                       "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+               fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
        }
        if (node->peer) {
                hsiphash_key_t key = { { 0 } };
@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
                        hsiphash_1u32(0xabad1dea, &key) % 200;
                style = "bold";
        }
-       swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
-       printk(fmt_declaration, ip1, node->cidr, style, color);
+       wg_allowedips_read_node(node, ip1, &cidr1);
+       printk(fmt_declaration, ip1, cidr1, style, color);
        if (node->bit[0]) {
-               swap_endian_and_apply_cidr(ip2,
-                               rcu_dereference_raw(node->bit[0])->bits, bits,
-                               node->cidr);
-               printk(fmt_connection, ip1, node->cidr, ip2,
-                      rcu_dereference_raw(node->bit[0])->cidr);
-               print_node(rcu_dereference_raw(node->bit[0]), bits);
+               wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
+               printk(fmt_connection, ip1, cidr1, ip2, cidr2);
        }
        if (node->bit[1]) {
-               swap_endian_and_apply_cidr(ip2,
-                               rcu_dereference_raw(node->bit[1])->bits,
-                               bits, node->cidr);
-               printk(fmt_connection, ip1, node->cidr, ip2,
-                      rcu_dereference_raw(node->bit[1])->cidr);
-               print_node(rcu_dereference_raw(node->bit[1]), bits);
+               wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
+               printk(fmt_connection, ip1, cidr1, ip2, cidr2);
        }
+       if (node->bit[0])
+               print_node(rcu_dereference_raw(node->bit[0]), bits);
+       if (node->bit[1])
+               print_node(rcu_dereference_raw(node->bit[1]), bits);
 }
 
 static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
 {
        union nf_inet_addr mask;
 
-       memset(&mask, 0x00, 128 / 8);
-       memset(&mask, 0xff, cidr / 8);
+       memset(&mask, 0, sizeof(mask));
+       memset(&mask.all, 0xff, cidr / 8);
        if (cidr % 32)
                mask.all[cidr / 32] = (__force u32)htonl(
                        (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
 }
 
 static __init inline bool
-horrible_match_v4(const struct horrible_allowedips_node *node,
-                 struct in_addr *ip)
+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
 {
        return (ip->s_addr & node->mask.ip) == node->ip.ip;
 }
 
 static __init inline bool
-horrible_match_v6(const struct horrible_allowedips_node *node,
-                 struct in6_addr *ip)
+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
 {
-       return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
-                      node->ip.ip6[0] &&
-              (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
-                      node->ip.ip6[1] &&
-              (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
-                      node->ip.ip6[2] &&
+       return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
+              (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
+              (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
               (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
 }
 
 static __init void
-horrible_insert_ordered(struct horrible_allowedips *table,
-                       struct horrible_allowedips_node *node)
+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
 {
        struct horrible_allowedips_node *other = NULL, *where = NULL;
        u8 my_cidr = horrible_mask_to_cidr(node->mask);
 
        hlist_for_each_entry(other, &table->head, table) {
-               if (!memcmp(&other->mask, &node->mask,
-                           sizeof(union nf_inet_addr)) &&
-                   !memcmp(&other->ip, &node->ip,
-                           sizeof(union nf_inet_addr)) &&
-                   other->ip_version == node->ip_version) {
+               if (other->ip_version == node->ip_version &&
+                   !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
+                   !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
                        other->value = node->value;
                        kfree(node);
                        return;
                }
+       }
+       hlist_for_each_entry(other, &table->head, table) {
                where = other;
                if (horrible_mask_to_cidr(other->mask) <= my_cidr)
                        break;
@@ -201,8 +181,7 @@ static __init int
 horrible_allowedips_insert_v4(struct horrible_allowedips *table,
                              struct in_addr *ip, u8 cidr, void *value)
 {
-       struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
-                                                       GFP_KERNEL);
+       struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
 
        if (unlikely(!node))
                return -ENOMEM;
@@ -219,8 +198,7 @@ static __init int
 horrible_allowedips_insert_v6(struct horrible_allowedips *table,
                              struct in6_addr *ip, u8 cidr, void *value)
 {
-       struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
-                                                       GFP_KERNEL);
+       struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
 
        if (unlikely(!node))
                return -ENOMEM;
@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
 }
 
 static __init void *
-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
-                             struct in_addr *ip)
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
 {
        struct horrible_allowedips_node *node;
-       void *ret = NULL;
 
        hlist_for_each_entry(node, &table->head, table) {
-               if (node->ip_version != 4)
-                       continue;
-               if (horrible_match_v4(node, ip)) {
-                       ret = node->value;
-                       break;
-               }
+               if (node->ip_version == 4 && horrible_match_v4(node, ip))
+                       return node->value;
        }
-       return ret;
+       return NULL;
 }
 
 static __init void *
-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
-                             struct in6_addr *ip)
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
 {
        struct horrible_allowedips_node *node;
-       void *ret = NULL;
 
        hlist_for_each_entry(node, &table->head, table) {
-               if (node->ip_version != 6)
+               if (node->ip_version == 6 && horrible_match_v6(node, ip))
+                       return node->value;
+       }
+       return NULL;
+}
+
+
+static __init void
+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
+{
+       struct horrible_allowedips_node *node;
+       struct hlist_node *h;
+
+       hlist_for_each_entry_safe(node, h, &table->head, table) {
+               if (node->value != value)
                        continue;
-               if (horrible_match_v6(node, ip)) {
-                       ret = node->value;
-                       break;
-               }
+               hlist_del(&node->table);
+               kfree(node);
        }
-       return ret;
+
 }
 
 static __init bool randomized_test(void)
@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
                        goto free;
                }
                kref_init(&peers[i]->refcount);
+               INIT_LIST_HEAD(&peers[i]->allowedips_list);
        }
 
        mutex_lock(&mutex);
@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
                        if (wg_allowedips_insert_v4(&t,
                                                    (struct in_addr *)mutated,
                                                    cidr, peer, &mutex) < 0) {
-                               pr_err("allowedips random malloc: FAIL\n");
+                               pr_err("allowedips random self-test malloc: FAIL\n");
                                goto free_locked;
                        }
                        if (horrible_allowedips_insert_v4(&h,
@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
                print_tree(t.root6, 128);
        }
 
-       for (i = 0; i < NUM_QUERIES; ++i) {
-               prandom_bytes(ip, 4);
-               if (lookup(t.root4, 32, ip) !=
-                   horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
-                       pr_err("allowedips random self-test: FAIL\n");
-                       goto free;
+       for (j = 0;; ++j) {
+               for (i = 0; i < NUM_QUERIES; ++i) {
+                       prandom_bytes(ip, 4);
+                       if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+                               horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
+                               pr_err("allowedips random v4 self-test: FAIL\n");
+                               goto free;
+                       }
+                       prandom_bytes(ip, 16);
+                       if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+                               pr_err("allowedips random v6 self-test: FAIL\n");
+                               goto free;
+                       }
                }
+               if (j >= NUM_PEERS)
+                       break;
+               mutex_lock(&mutex);
+               wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
+               mutex_unlock(&mutex);
+               horrible_allowedips_remove_by_value(&h, peers[j]);
        }
 
-       for (i = 0; i < NUM_QUERIES; ++i) {
-               prandom_bytes(ip, 16);
-               if (lookup(t.root6, 128, ip) !=
-                   horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
-                       pr_err("allowedips random self-test: FAIL\n");
-                       goto free;
-               }
+       if (t.root4 || t.root6) {
+               pr_err("allowedips random self-test removal: FAIL\n");
+               goto free;
        }
+
        ret = true;
 
 free:
index d9ad850..8c496b7 100644 (file)
@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
        if (new4)
                wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
        mutex_unlock(&wg->socket_update_lock);
-       synchronize_rcu();
+       synchronize_net();
        sock_free(old4);
        sock_free(old6);
 }
index 51ce767..7a6fd46 100644 (file)
@@ -1693,8 +1693,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
 static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
 {
        struct mac80211_hwsim_data *data = hw->priv;
+
        data->started = false;
        hrtimer_cancel(&data->beacon_timer);
+
+       while (!skb_queue_empty(&data->pending))
+               ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+
        wiphy_dbg(hw->wiphy, "%s\n", __func__);
 }
 
index 977acab..03fe628 100644 (file)
@@ -514,10 +514,36 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
 {
        struct sk_buff *skb = phy->rx_amsdu[q].head;
+       struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
        struct mt76_dev *dev = phy->dev;
 
        phy->rx_amsdu[q].head = NULL;
        phy->rx_amsdu[q].tail = NULL;
+
+       /*
+        * Validate if the amsdu has a proper first subframe.
+        * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
+        * flag of the QoS header gets flipped. In such cases, the first
+        * subframe has a LLC/SNAP header in the location of the destination
+        * address.
+        */
+       if (skb_shinfo(skb)->frag_list) {
+               int offset = 0;
+
+               if (!(status->flag & RX_FLAG_8023)) {
+                       offset = ieee80211_get_hdrlen_from_skb(skb);
+
+                       if ((status->flag &
+                            (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
+                           RX_FLAG_DECRYPTED)
+                               offset += 8;
+               }
+
+               if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
+                       dev_kfree_skb(skb);
+                       return;
+               }
+       }
        __skb_queue_tail(&dev->rx_skb[q], skb);
 }
 
index 86341d1..d20f05a 100644 (file)
@@ -510,7 +510,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
        mutex_init(&dev->pm.mutex);
        init_waitqueue_head(&dev->pm.wait);
        spin_lock_init(&dev->pm.txq_lock);
-       set_bit(MT76_STATE_PM, &dev->mphy.state);
        INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
        INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
        INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
index f81a17d..e2dcfee 100644 (file)
@@ -1912,8 +1912,9 @@ void mt7615_pm_wake_work(struct work_struct *work)
                        napi_schedule(&dev->mt76.napi[i]);
                mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
                mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
-               ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+                       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+                                                    MT7615_WATCHDOG_TIME);
        }
 
        ieee80211_wake_queues(mphy->hw);
index 17fe418..d1be78b 100644 (file)
@@ -51,16 +51,13 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        return ret;
 }
 
-static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
        u32 status;
        int ret;
 
-       if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
-               goto out;
-
        sdio_claim_host(func);
 
        sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
@@ -76,13 +73,21 @@ static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
        }
 
        sdio_release_host(func);
-
-out:
        dev->pm.last_activity = jiffies;
 
        return 0;
 }
 
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+
+       if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+               return __mt7663s_mcu_drv_pmctrl(dev);
+
+       return 0;
+}
+
 static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
@@ -123,7 +128,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
        struct mt7615_mcu_ops *mcu_ops;
        int ret;
 
-       ret = mt7663s_mcu_drv_pmctrl(dev);
+       ret = __mt7663s_mcu_drv_pmctrl(dev);
        if (ret)
                return ret;
 
index c55698f..028ff43 100644 (file)
@@ -55,10 +55,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
 
        dev->mt76.mcu_ops = &mt7663u_mcu_ops,
 
-       /* usb does not support runtime-pm */
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
        mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
-
        if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
                mt7615_mcu_restart(&dev->mt76);
                if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
index fe0ab5e..6195616 100644 (file)
@@ -721,6 +721,10 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
        phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
        phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
        phy->rcpi = rcpi;
+       phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
+                               sta->ht_cap.ampdu_factor) |
+                    FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
+                               sta->ht_cap.ampdu_density);
 
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
        ra_info = (struct sta_rec_ra_info *)tlv;
index 5847f94..b795e72 100644 (file)
@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
        .reconfig_complete = mt76x02_reconfig_complete,
 };
 
-static int mt76x0e_register_device(struct mt76x02_dev *dev)
+static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
 {
        int err;
 
@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
        if (err < 0)
                return err;
 
-       err = mt76x02_dma_init(dev);
-       if (err < 0)
-               return err;
+       if (!resume) {
+               err = mt76x02_dma_init(dev);
+               if (err < 0)
+                       return err;
+       }
 
        err = mt76x0_init_hardware(dev);
        if (err < 0)
@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
        mt76_clear(dev, 0x110, BIT(9));
        mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
 
+       return 0;
+}
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+       int err;
+
+       err = mt76x0e_init_hardware(dev, false);
+       if (err < 0)
+               return err;
+
        err = mt76x0_register_device(dev);
        if (err < 0)
                return err;
@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                return ret;
 
+       mt76_pci_disable_aspm(pdev);
+
        mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
                                 &drv_ops);
        if (!mdev)
@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
        mt76_free_device(mdev);
 }
 
+#ifdef CONFIG_PM
+static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int i;
+
+       mt76_worker_disable(&mdev->tx_worker);
+       for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
+               mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
+       for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
+               mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
+       napi_disable(&mdev->tx_napi);
+
+       mt76_for_each_q_rx(mdev, i)
+               napi_disable(&mdev->napi[i]);
+
+       mt76x02_dma_disable(dev);
+       mt76x02_mcu_cleanup(dev);
+       mt76x0_chip_onoff(dev, false, false);
+
+       pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+       pci_save_state(pdev);
+
+       return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int mt76x0e_resume(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int err, i;
+
+       err = pci_set_power_state(pdev, PCI_D0);
+       if (err)
+               return err;
+
+       pci_restore_state(pdev);
+
+       mt76_worker_enable(&mdev->tx_worker);
+
+       mt76_for_each_q_rx(mdev, i) {
+               mt76_queue_rx_reset(dev, i);
+               napi_enable(&mdev->napi[i]);
+               napi_schedule(&mdev->napi[i]);
+       }
+
+       napi_enable(&mdev->tx_napi);
+       napi_schedule(&mdev->tx_napi);
+
+       return mt76x0e_init_hardware(dev, true);
+}
+#endif /* CONFIG_PM */
+
 static const struct pci_device_id mt76x0e_device_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
        .id_table       = mt76x0e_device_table,
        .probe          = mt76x0e_probe,
        .remove         = mt76x0e_remove,
+#ifdef CONFIG_PM
+       .suspend        = mt76x0e_suspend,
+       .resume         = mt76x0e_resume,
+#endif /* CONFIG_PM */
 };
 
 module_pci_driver(mt76x0e_driver);
index fe28bf4..1763ea0 100644 (file)
@@ -76,8 +76,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        struct wiphy *wiphy = hw->wiphy;
 
        hw->queues = 4;
-       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
-       hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       hw->max_rx_aggregation_subframes = 64;
+       hw->max_tx_aggregation_subframes = 128;
 
        hw->radiotap_timestamp.units_pos =
                IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
index 214bd18..decf2d5 100644 (file)
@@ -1404,8 +1404,9 @@ void mt7921_pm_wake_work(struct work_struct *work)
                        napi_schedule(&dev->mt76.napi[i]);
                mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
                mt7921_tx_cleanup(dev);
-               ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                            MT7921_WATCHDOG_TIME);
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+                       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+                                                    MT7921_WATCHDOG_TIME);
        }
 
        ieee80211_wake_queues(mphy->hw);
index f4c27aa..97a0ef3 100644 (file)
@@ -74,8 +74,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                                IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
                else if (band == NL80211_BAND_5GHZ)
                        he_cap_elem->phy_cap_info[0] =
-                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
-                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
 
                he_cap_elem->phy_cap_info[1] =
                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
index 5f3d56d..67dc4b4 100644 (file)
@@ -402,20 +402,22 @@ static void
 mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
                          u16 wlan_idx)
 {
-       struct mt7921_mcu_wlan_info_event *wtbl_info =
-               (struct mt7921_mcu_wlan_info_event *)(skb->data);
-       struct rate_info rate = {};
-       u8 curr_idx = wtbl_info->rate_info.rate_idx;
-       u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
-       struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
+       struct mt7921_mcu_wlan_info_event *wtbl_info;
        struct mt76_phy *mphy = &dev->mphy;
        struct mt7921_sta_stats *stats;
+       struct rate_info rate = {};
        struct mt7921_sta *msta;
        struct mt76_wcid *wcid;
+       u8 idx;
 
        if (wlan_idx >= MT76_N_WCIDS)
                return;
 
+       wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
+       idx = wtbl_info->rate_info.rate_idx;
+       if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
+               return;
+
        rcu_read_lock();
 
        wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
@@ -426,7 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
        stats = &msta->stats;
 
        /* current rate */
-       mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
+       mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
+                                le16_to_cpu(wtbl_info->rate_info.rate[idx]));
        stats->tx_rate = rate;
 out:
        rcu_read_unlock();
index 193b723..c58996c 100644 (file)
@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
 {
        if (queue->task) {
                kthread_stop(queue->task);
+               put_task_struct(queue->task);
                queue->task = NULL;
        }
 
@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
        if (IS_ERR(task))
                goto kthread_err;
        queue->task = task;
+       /*
+        * Take a reference to the task in order to prevent it from being freed
+        * if the thread function returns before kthread_stop is called.
+        */
+       get_task_struct(task);
 
        task = kthread_run(xenvif_dealloc_kthread, queue,
                           "%s-dealloc", queue->name);
index 37943dc..4697a94 100644 (file)
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
                int count)
 {
        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
-       struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
        struct ib_sge *sge = &req->sge[1];
+       struct scatterlist *sgl;
        u32 len = 0;
        int i;
 
-       for (i = 0; i < count; i++, sgl++, sge++) {
+       for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
                sge->addr = sg_dma_address(sgl);
                sge->length = sg_dma_len(sgl);
                sge->lkey = queue->device->pd->local_dma_lkey;
                len += sge->length;
+               sge++;
        }
 
        sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
index 4b29a5b..b20b8d0 100644 (file)
@@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
        return req->transfer_len - req->metadata_len;
 }
 
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+               struct nvmet_req *req)
 {
-       req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+       req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
                        nvmet_data_transfer_len(req));
        if (!req->sg)
                goto out_err;
 
        if (req->metadata_len) {
-               req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+               req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
                                &req->metadata_sg_cnt, req->metadata_len);
                if (!req->metadata_sg)
                        goto out_free_sg;
        }
+
+       req->p2p_dev = p2p_dev;
+
        return 0;
 out_free_sg:
        pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1025,25 +1029,19 @@ out_err:
        return -ENOMEM;
 }
 
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
 {
-       if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
-               return false;
-
-       if (req->sq->ctrl && req->sq->qid && req->ns) {
-               req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
-                                                req->ns->nsid);
-               if (req->p2p_dev)
-                       return true;
-       }
-
-       req->p2p_dev = NULL;
-       return false;
+       if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+           !req->sq->ctrl || !req->sq->qid || !req->ns)
+               return NULL;
+       return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
 }
 
 int nvmet_req_alloc_sgls(struct nvmet_req *req)
 {
-       if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+       struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+       if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
                return 0;
 
        req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1072,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
                pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
                if (req->metadata_sg)
                        pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+               req->p2p_dev = NULL;
        } else {
                sgl_free(req->sg);
                if (req->metadata_sg)
index cb30cb9..a5c4a18 100644 (file)
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
-       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+       if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+               return;
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
                clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
        }
+       ctrl->ctrl.queue_count = 1;
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        return 0;
 
 out_cleanup_queue:
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
 out_cleanup_fabrics_q:
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        nvme_loop_shutdown_ctrl(ctrl);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
-               /* state change failure should never happen */
-               WARN_ON_ONCE(1);
+               if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+                   ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+                       /* state change failure for non-deleted ctrl? */
+                       WARN_ON_ONCE(1);
                return;
        }
 
index eca805c..9e6ce0d 100644 (file)
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
 obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
 obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
 obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
 obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
 obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
 
@@ -38,6 +39,6 @@ ifdef CONFIG_ACPI
 ifdef CONFIG_PCI_QUIRKS
 obj-$(CONFIG_ARM64) += pcie-al.o
 obj-$(CONFIG_ARM64) += pcie-hisi.o
-obj-$(CONFIG_ARM64) += pcie-tegra194.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
 endif
 endif
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644 (file)
index 0000000..c2de6ed
--- /dev/null
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam  {
+       void __iomem *config_base;
+       void __iomem *iatu_base;
+       void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+       struct device *dev = cfg->parent;
+       struct tegra194_pcie_ecam *pcie_ecam;
+
+       pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+       if (!pcie_ecam)
+               return -ENOMEM;
+
+       pcie_ecam->config_base = cfg->win;
+       pcie_ecam->iatu_base = cfg->win + SZ_256K;
+       pcie_ecam->dbi_base = cfg->win + SZ_512K;
+       cfg->priv = pcie_ecam;
+
+       return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+                         u32 val, u32 reg)
+{
+       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+       writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+                                int index, int type, u64 cpu_addr,
+                                u64 pci_addr, u64 size)
+{
+       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+                     PCIE_ATU_LOWER_BASE);
+       atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+                     PCIE_ATU_UPPER_BASE);
+       atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+                     PCIE_ATU_LOWER_TARGET);
+       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+                     PCIE_ATU_LIMIT);
+       atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+                     PCIE_ATU_UPPER_TARGET);
+       atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+       atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+                                     unsigned int devfn, int where)
+{
+       struct pci_config_window *cfg = bus->sysdata;
+       struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+       u32 busdev;
+       int type;
+
+       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+               return NULL;
+
+       if (bus->number == cfg->busr.start) {
+               if (PCI_SLOT(devfn) == 0)
+                       return pcie_ecam->dbi_base + where;
+               else
+                       return NULL;
+       }
+
+       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+                PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+       if (bus->parent->number == cfg->busr.start) {
+               if (PCI_SLOT(devfn) == 0)
+                       type = PCIE_ATU_TYPE_CFG0;
+               else
+                       return NULL;
+       } else {
+               type = PCIE_ATU_TYPE_CFG1;
+       }
+
+       program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+                            SZ_256K);
+
+       return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+       .init           = tegra194_acpi_init,
+       .pci_ops        = {
+               .map_bus        = tegra194_map_bus,
+               .read           = pci_generic_config_read,
+               .write          = pci_generic_config_write,
+       }
+};
index bafd2c6..504669e 100644 (file)
@@ -22,8 +22,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/pci-ecam.h>
 #include <linux/phy/phy.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
@@ -247,24 +245,6 @@ static const unsigned int pcie_gen_freq[] = {
        GEN4_CORE_CLK_FREQ
 };
 
-static const u32 event_cntr_ctrl_offset[] = {
-       0x1d8,
-       0x1a8,
-       0x1a8,
-       0x1a8,
-       0x1c4,
-       0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
-       0x1dc,
-       0x1ac,
-       0x1ac,
-       0x1ac,
-       0x1c8,
-       0x1dc
-};
-
 struct tegra_pcie_dw {
        struct device *dev;
        struct resource *appl_res;
@@ -313,104 +293,6 @@ struct tegra_pcie_dw_of_data {
        enum dw_pcie_device_mode mode;
 };
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-struct tegra194_pcie_ecam  {
-       void __iomem *config_base;
-       void __iomem *iatu_base;
-       void __iomem *dbi_base;
-};
-
-static int tegra194_acpi_init(struct pci_config_window *cfg)
-{
-       struct device *dev = cfg->parent;
-       struct tegra194_pcie_ecam *pcie_ecam;
-
-       pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
-       if (!pcie_ecam)
-               return -ENOMEM;
-
-       pcie_ecam->config_base = cfg->win;
-       pcie_ecam->iatu_base = cfg->win + SZ_256K;
-       pcie_ecam->dbi_base = cfg->win + SZ_512K;
-       cfg->priv = pcie_ecam;
-
-       return 0;
-}
-
-static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
-                         u32 val, u32 reg)
-{
-       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
-       writel(val, pcie_ecam->iatu_base + offset + reg);
-}
-
-static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
-                                int index, int type, u64 cpu_addr,
-                                u64 pci_addr, u64 size)
-{
-       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
-                     PCIE_ATU_LOWER_BASE);
-       atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
-                     PCIE_ATU_UPPER_BASE);
-       atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
-                     PCIE_ATU_LOWER_TARGET);
-       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
-                     PCIE_ATU_LIMIT);
-       atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
-                     PCIE_ATU_UPPER_TARGET);
-       atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
-       atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
-
-static void __iomem *tegra194_map_bus(struct pci_bus *bus,
-                                     unsigned int devfn, int where)
-{
-       struct pci_config_window *cfg = bus->sysdata;
-       struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
-       u32 busdev;
-       int type;
-
-       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
-               return NULL;
-
-       if (bus->number == cfg->busr.start) {
-               if (PCI_SLOT(devfn) == 0)
-                       return pcie_ecam->dbi_base + where;
-               else
-                       return NULL;
-       }
-
-       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
-                PCIE_ATU_FUNC(PCI_FUNC(devfn));
-
-       if (bus->parent->number == cfg->busr.start) {
-               if (PCI_SLOT(devfn) == 0)
-                       type = PCIE_ATU_TYPE_CFG0;
-               else
-                       return NULL;
-       } else {
-               type = PCIE_ATU_TYPE_CFG1;
-       }
-
-       program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
-                            SZ_256K);
-
-       return pcie_ecam->config_base + where;
-}
-
-const struct pci_ecam_ops tegra194_pcie_ops = {
-       .init           = tegra194_acpi_init,
-       .pci_ops        = {
-               .map_bus        = tegra194_map_bus,
-               .read           = pci_generic_config_read,
-               .write          = pci_generic_config_write,
-       }
-};
-#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
-
-#ifdef CONFIG_PCIE_TEGRA194
-
 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
 {
        return container_of(pci, struct tegra_pcie_dw, pci);
@@ -694,6 +576,24 @@ static struct pci_ops tegra_pci_ops = {
 };
 
 #if defined(CONFIG_PCIEASPM)
+static const u32 event_cntr_ctrl_offset[] = {
+       0x1d8,
+       0x1a8,
+       0x1a8,
+       0x1a8,
+       0x1c4,
+       0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+       0x1dc,
+       0x1ac,
+       0x1ac,
+       0x1ac,
+       0x1c8,
+       0x1dc
+};
+
 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
 {
        u32 val;
@@ -2411,5 +2311,3 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
 MODULE_LICENSE("GPL v2");
-
-#endif /* CONFIG_PCIE_TEGRA194 */
index 051b48b..e3f5e7a 100644 (file)
@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
                udelay(PIO_RETRY_DELAY);
        }
 
-       dev_err(dev, "config read/write timed out\n");
+       dev_err(dev, "PIO read/write transfer time out\n");
        return -ETIMEDOUT;
 }
 
@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
        return true;
 }
 
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+       struct device *dev = &pcie->pdev->dev;
+
+       /*
+        * Trying to start a new PIO transfer when previous has not completed
+        * cause External Abort on CPU which results in kernel panic:
+        *
+        *     SError Interrupt on CPU0, code 0xbf000002 -- SError
+        *     Kernel panic - not syncing: Asynchronous SError Interrupt
+        *
+        * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+        * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+        * concurrent calls at the same time. But because PIO transfer may take
+        * about 1.5s when link is down or card is disconnected, it means that
+        * advk_pcie_wait_pio() does not always have to wait for completion.
+        *
+        * Some versions of ARM Trusted Firmware handles this External Abort at
+        * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+        * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+        */
+       if (advk_readl(pcie, PIO_START)) {
+               dev_err(dev, "Previous PIO read/write transfer is still running\n");
+               return true;
+       }
+
+       return false;
+}
+
 static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
                             int where, int size, u32 *val)
 {
@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
                return pci_bridge_emul_conf_read(&pcie->bridge, where,
                                                 size, val);
 
-       /* Start PIO */
-       advk_writel(pcie, 0, PIO_START);
-       advk_writel(pcie, 1, PIO_ISR);
+       if (advk_pcie_pio_is_running(pcie)) {
+               *val = 0xffffffff;
+               return PCIBIOS_SET_FAILED;
+       }
 
        /* Program the control register */
        reg = advk_readl(pcie, PIO_CTRL);
@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
        /* Program the data strobe */
        advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
 
-       /* Start the transfer */
+       /* Clear PIO DONE ISR and start the transfer */
+       advk_writel(pcie, 1, PIO_ISR);
        advk_writel(pcie, 1, PIO_START);
 
        ret = advk_pcie_wait_pio(pcie);
@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
        if (where % size)
                return PCIBIOS_SET_FAILED;
 
-       /* Start PIO */
-       advk_writel(pcie, 0, PIO_START);
-       advk_writel(pcie, 1, PIO_ISR);
+       if (advk_pcie_pio_is_running(pcie))
+               return PCIBIOS_SET_FAILED;
 
        /* Program the control register */
        reg = advk_readl(pcie, PIO_CTRL);
@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
        /* Program the data strobe */
        advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
 
-       /* Start the transfer */
+       /* Clear PIO DONE ISR and start the transfer */
+       advk_writel(pcie, 1, PIO_ISR);
        advk_writel(pcie, 1, PIO_START);
 
        ret = advk_pcie_wait_pio(pcie);
index da5b414..a143b02 100644 (file)
@@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
 #endif
 }
 
+bool pci_host_of_has_msi_map(struct device *dev)
+{
+       if (dev && dev->of_node)
+               return of_get_property(dev->of_node, "msi-map", NULL);
+       return false;
+}
+
 static inline int __of_pci_pci_compare(struct device_node *node,
                                       unsigned int data)
 {
@@ -346,6 +353,8 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
                                dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
                                         dev_node);
                        *io_base = range.cpu_addr;
+               } else if (resource_type(res) == IORESOURCE_MEM) {
+                       res->flags &= ~IORESOURCE_MEM_64;
                }
 
                pci_add_resource_offset(resources, res, res->start - range.pci_addr);
index 3a62d09..2752046 100644 (file)
@@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
        device_enable_async_suspend(bus->bridge);
        pci_set_bus_of_node(bus);
        pci_set_bus_msi_domain(bus);
-       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+           !pci_host_of_has_msi_map(parent))
                bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
 
        if (!parent)
index dcb229d..22b2bb1 100644 (file)
@@ -3546,6 +3546,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
        dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
 }
 
+/*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+       if ((dev->device & 0xffc0) == 0x2340)
+               quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+                        quirk_nvidia_no_bus_reset);
+
 /*
  * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
  * The device will throw a Link Down error on AER-capable systems and
@@ -3566,6 +3578,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
  */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
 
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset.  The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working.  Prevent bus reset for these devices.  With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs.  Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
+
 static void quirk_no_pm_reset(struct pci_dev *dev)
 {
        /*
@@ -3901,6 +3923,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
        return 0;
 }
 
+#define PCI_DEVICE_ID_HINIC_VF      0x375E
+#define HINIC_VF_FLR_TYPE           0x1000
+#define HINIC_VF_FLR_CAP_BIT        (1UL << 30)
+#define HINIC_VF_OP                 0xE80
+#define HINIC_VF_FLR_PROC_BIT       (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT     15000      /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+       unsigned long timeout;
+       void __iomem *bar;
+       u32 val;
+
+       if (probe)
+               return 0;
+
+       bar = pci_iomap(pdev, 0, 0);
+       if (!bar)
+               return -ENOTTY;
+
+       /* Get and check firmware capabilities */
+       val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+       if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+               pci_iounmap(pdev, bar);
+               return -ENOTTY;
+       }
+
+       /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+       val = ioread32be(bar + HINIC_VF_OP);
+       val = val | HINIC_VF_FLR_PROC_BIT;
+       iowrite32be(val, bar + HINIC_VF_OP);
+
+       pcie_flr(pdev);
+
+       /*
+        * The device must recapture its Bus and Device Numbers after FLR
+        * in order generate Completions.  Issue a config write to let the
+        * device capture this information.
+        */
+       pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+       /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+       timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+       do {
+               val = ioread32be(bar + HINIC_VF_OP);
+               if (!(val & HINIC_VF_FLR_PROC_BIT))
+                       goto reset_complete;
+               msleep(20);
+       } while (time_before(jiffies, timeout));
+
+       val = ioread32be(bar + HINIC_VF_OP);
+       if (!(val & HINIC_VF_FLR_PROC_BIT))
+               goto reset_complete;
+
+       pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+       pci_iounmap(pdev, bar);
+
+       return 0;
+}
+
 static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
                 reset_intel_82599_sfp_virtfn },
@@ -3913,6 +3998,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
        { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
        { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
                reset_chelsio_generic_dev },
+       { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+               reset_hinic_vf_dev },
        { 0 }
 };
 
@@ -4753,6 +4840,8 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
        { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
        { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+       /* Broadcom multi-function device */
+       { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
        /* Amazon Annapurna Labs */
        { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -5154,7 +5243,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
 static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
 {
        if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
-           (pdev->device == 0x7340 && pdev->revision != 0xc5))
+           (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
+           (pdev->device == 0x7341 && pdev->revision != 0x00))
                return;
 
        if (pdev->device == 0x15d8) {
@@ -5181,6 +5271,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
 /* AMD Navi14 dGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
 /* AMD Raven platform iGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
 #endif /* CONFIG_PCI_ATS */
index 899b9eb..a39f30f 100644 (file)
@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
 static inline void brcm_usb_writel(u32 val, void __iomem *addr)
 {
        /* See brcmnand_readl() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
index 5c68e31..e93818e 100644 (file)
@@ -940,6 +940,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        sp->nsubnodes = node;
 
        if (sp->num_lanes > SIERRA_MAX_LANES) {
+               ret = -EINVAL;
                dev_err(dev, "Invalid lane configuration\n");
                goto put_child2;
        }
index cdbcc49..731c483 100644 (file)
@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
                break;
        default:
                dev_err(tphy->dev, "incompatible PHY type\n");
+               clk_disable_unprepare(instance->ref_clk);
+               clk_disable_unprepare(instance->da_ref_clk);
                return -EINVAL;
        }
 
index c8a7d09..4076580 100644 (file)
@@ -2470,6 +2470,10 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
        priv->coreclock = clock;
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!iores) {
+               dev_err(priv->dev, "Invalid resource\n");
+               return -EINVAL;
+       }
        iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
        if (IS_ERR(iomem)) {
                dev_err(priv->dev, "Unable to get serdes registers: %s\n",
index 753cb5b..2a9465f 100644 (file)
@@ -341,7 +341,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
        .probe = mt7621_pci_phy_probe,
        .driver = {
                .name = "mt7621-pci-phy",
-               .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+               .of_match_table = mt7621_pci_phy_ids,
        },
 };
 
index 9eb6d37..126f5b8 100644 (file)
@@ -1212,6 +1212,7 @@ static int wiz_probe(struct platform_device *pdev)
 
                if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
                    wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+                       ret = -EINVAL;
                        dev_err(dev, "Invalid typec-dir-debounce property\n");
                        goto err_addr_to_resource;
                }
index 996ebcb..4c0d266 100644 (file)
@@ -2702,8 +2702,8 @@ static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
 }
 
 /**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g5_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
  *
  * @ctx: The pinmux context
  * @expr: The expression associated with the function whose signal is to be
index 5c1a109..eeab093 100644 (file)
@@ -2611,8 +2611,8 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
 };
 
 /**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g6_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
  *
  * @ctx: The pinmux context
  * @expr: The expression associated with the function whose signal is to be
index 9c65d56..9bbfe5c 100644 (file)
@@ -108,7 +108,8 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
 }
 
 /**
- * Disable a signal on a pin by disabling all provided signal expressions.
+ * aspeed_disable_sig() - Disable a signal on a pin by disabling all provided
+ * signal expressions.
  *
  * @ctx: The pinmux context
  * @exprs: The list of signal expressions (from a priority level on a pin)
index 57305ca..894e2ef 100644 (file)
@@ -21,7 +21,8 @@ static inline void aspeed_sig_desc_print_val(
 }
 
 /**
- * Query the enabled or disabled state of a signal descriptor
+ * aspeed_sig_desc_eval() - Query the enabled or disabled state of a signal
+ * descriptor.
  *
  * @desc: The signal descriptor of interest
  * @enabled: True to query the enabled state, false to query disabled state
index 25d2f7f..11e967d 100644 (file)
@@ -223,7 +223,7 @@ config PINCTRL_SC7280
 config PINCTRL_SC8180X
        tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
        depends on GPIOLIB && (OF || ACPI)
-       select PINCTRL_MSM
+       depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
index 5aaf57b..0bb4931 100644 (file)
@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
        "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
        "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
        "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
-       "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
-       "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
-       "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
-       "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
-       "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
-       "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
-       "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
-       "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
-       "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+       "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+       "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+       "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+       "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+       "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+       "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+       "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+       "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+       "gpio105", "gpio106", "gpio107",
 };
 
 static const char * const qdss_stm_groups[] = {
index 1f4bca8..a9b511c 100644 (file)
@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
        if (p->groups[group].enabled) {
                dev_err(p->dev, "%s is already enabled\n",
                        p->groups[group].name);
-               return -EBUSY;
+               return 0;
        }
 
        p->groups[group].enabled = 1;
index a9db2f3..b013445 100644 (file)
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
 
        err = devm_request_irq(&pdev->dev, priv->irq,
                               mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
-                              | IRQF_SHARED | IRQF_NO_AUTOEN,
-                              "mlxreg-hotplug", priv);
+                              | IRQF_SHARED, "mlxreg-hotplug", priv);
        if (err) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
                return err;
        }
 
+       disable_irq(priv->irq);
        spin_lock_init(&priv->lock);
        INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
        dev_set_drvdata(&pdev->dev, priv);
index 8a70df6..a06964a 100644 (file)
@@ -1907,7 +1907,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
 {
        int status;
 
-       status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+       status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
 
        if (status < 0 && status != -EINVAL) {
                ssam_err(ctrl,
index 685d37a..ef83461 100644 (file)
@@ -156,7 +156,7 @@ static const struct software_node *ssam_node_group_sl2[] = {
        NULL,
 };
 
-/* Devices for Surface Laptop 3. */
+/* Devices for Surface Laptop 3 and 4. */
 static const struct software_node *ssam_node_group_sl3[] = {
        &ssam_node_root,
        &ssam_node_bat_ac,
@@ -521,9 +521,12 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
        /* Surface Laptop 3 (13", Intel) */
        { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
 
-       /* Surface Laptop 3 (15", AMD) */
+       /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
        { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
 
+       /* Surface Laptop 4 (13", Intel) */
+       { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
        /* Surface Laptop Go 1 */
        { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
 
index 5d9b758..1203b9a 100644 (file)
@@ -427,6 +427,7 @@ static int surface_dtx_open(struct inode *inode, struct file *file)
         */
        if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
                up_write(&ddev->client_lock);
+               mutex_destroy(&client->read_lock);
                sdtx_device_put(client->ddev);
                kfree(client);
                return -ENODEV;
index dd60c93..edd71e7 100644 (file)
@@ -8853,6 +8853,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
        TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),  /* P15 (1st gen) / P15v (1st gen) */
+       TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),  /* X1 Carbon (9th gen) */
 };
 
 static int __init fan_init(struct ibm_init_struct *iibm)
index 03a246e..21c4c34 100644 (file)
@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
        spin_unlock_irqrestore(&queue->lock, flags);
 }
 
-s32 scaled_ppm_to_ppb(long ppm)
+long scaled_ppm_to_ppb(long ppm)
 {
        /*
         * The 'freq' field in the 'struct timex' is in parts per
@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
        s64 ppb = 1 + ppm;
        ppb *= 125;
        ppb >>= 13;
-       return (s32) ppb;
+       return (long) ppb;
 }
 EXPORT_SYMBOL(scaled_ppm_to_ppb);
 
@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
                delta = ktime_to_ns(kt);
                err = ops->adjtime(ops, delta);
        } else if (tx->modes & ADJ_FREQUENCY) {
-               s32 ppb = scaled_ppm_to_ppb(tx->freq);
+               long ppb = scaled_ppm_to_ppb(tx->freq);
                if (ppb > ops->max_adj || ppb < -ops->max_adj)
                        return -ERANGE;
                if (ops->adjfine)
index 9d84d92..3e7a385 100644 (file)
@@ -1031,7 +1031,7 @@ config REGULATOR_RT5033
          current source, LDO and Buck.
 
 config REGULATOR_RTMV20
-       tristate "RTMV20 Laser Diode Regulator"
+       tristate "Richtek RTMV20 Laser Diode Regulator"
        depends on I2C
        select REGMAP_I2C
        help
index d8b4299..05147d2 100644 (file)
@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
 
 static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
        REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
-       REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+       REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
 };
 
 static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
        REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
-       REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+       REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
 };
 
 static const unsigned int atc260x_ldo_voltage_range_sel[] = {
-       0x0, 0x1,
+       0x0, 0x20,
 };
 
 static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
        .owner = THIS_MODULE, \
 }
 
-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
        .name = "LDO"#num, \
        .supply_name = "ldo"#num, \
        .of_match = of_match_ptr("ldo"#num), \
@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
        .type = REGULATOR_VOLTAGE, \
        .linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
        .n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+       .n_voltages = n_volt, \
        .vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
        .vsel_mask = GENMASK(4, 1), \
        .vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
        atc2609a_reg_desc_ldo_bypass(0),
        atc2609a_reg_desc_ldo_bypass(1),
        atc2609a_reg_desc_ldo_bypass(2),
-       atc2609a_reg_desc_ldo_range_pick(3, 0),
-       atc2609a_reg_desc_ldo_range_pick(4, 0),
+       atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
+       atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
        atc2609a_reg_desc_ldo(5),
-       atc2609a_reg_desc_ldo_range_pick(6, 1),
-       atc2609a_reg_desc_ldo_range_pick(7, 0),
-       atc2609a_reg_desc_ldo_range_pick(8, 0),
+       atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
+       atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
+       atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
        atc2609a_reg_desc_ldo_fixed(9),
 };
 
index e61295b..b1eb469 100644 (file)
@@ -334,7 +334,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
            NULL);
 
 BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
            regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
            NULL);
 /*
index f192bf1..e20e77e 100644 (file)
@@ -1425,6 +1425,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
         * and we have control then make sure it is enabled.
         */
        if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+               /* If we want to enable this regulator, make sure that we know
+                * the supplying regulator.
+                */
+               if (rdev->supply_name && !rdev->supply)
+                       return -EPROBE_DEFER;
+
                if (rdev->supply) {
                        ret = regulator_enable(rdev->supply);
                        if (ret < 0) {
index eb3fc1d..c4754f3 100644 (file)
@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
 
        drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
        if (IS_ERR(drvdata->dev)) {
+               ret = PTR_ERR(drvdata->dev);
                dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
-               return PTR_ERR(drvdata->dev);
+               return ret;
        }
 
        platform_set_drvdata(pdev, drvdata);
index 08cbf68..e669250 100644 (file)
@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
        case DA9121_BUCK_MODE_FORCE_PFM:
                return REGULATOR_MODE_STANDBY;
        default:
-               return -EINVAL;
+               return REGULATOR_MODE_INVALID;
        }
 }
 
@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
 {
        struct da9121 *chip = rdev_get_drvdata(rdev);
        int id = rdev_get_id(rdev);
-       unsigned int val;
+       unsigned int val, mode;
        int ret = 0;
 
        ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
                return -EINVAL;
        }
 
-       return da9121_map_mode(val & da9121_mode_field[id].msk);
+       mode = da9121_map_mode(val & da9121_mode_field[id].msk);
+       if (mode == REGULATOR_MODE_INVALID)
+               return -EINVAL;
+
+       return mode;
 }
 
 static const struct regulator_ops da9121_buck_ops = {
index f3918f0..26f06f6 100644 (file)
@@ -55,7 +55,6 @@
 
 #define FAN53555_NVOLTAGES     64      /* Numbers of voltages */
 #define FAN53526_NVOLTAGES     128
-#define TCS4525_NVOLTAGES      127     /* Numbers of voltages */
 
 #define TCS_VSEL_NSEL_MASK     0x7f
 #define TCS_VSEL0_MODE         (1 << 7)
@@ -376,7 +375,7 @@ static int fan53555_voltages_setup_tcs(struct fan53555_device_info *di)
        /* Init voltage range and step */
        di->vsel_min = 600000;
        di->vsel_step = 6250;
-       di->vsel_count = TCS4525_NVOLTAGES;
+       di->vsel_count = FAN53526_NVOLTAGES;
 
        return 0;
 }
index e83eb4f..1684faf 100644 (file)
@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
                      REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
                },                                                      \
                .n_linear_ranges = 2,                                   \
+               .n_voltages =      0x74,                                \
                .vsel_reg =        FAN53880_LDO ## _num ## VOUT,        \
                .vsel_mask =       0x7f,                                \
                .enable_reg =      FAN53880_ENABLE,                     \
@@ -76,6 +77,7 @@ static const struct regulator_desc fan53880_regulators[] = {
                      REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
                },
                .n_linear_ranges = 2,
+               .n_voltages =      0xf8,
                .vsel_reg =        FAN53880_BUCKVOUT,
                .vsel_mask =       0x7f,
                .enable_reg =      FAN53880_ENABLE,
@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
                      REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
                },
                .n_linear_ranges = 2,
+               .n_voltages =      0x71,
                .vsel_reg =        FAN53880_BOOSTVOUT,
                .vsel_mask =       0x7f,
                .enable_reg =      FAN53880_ENABLE_BOOST,
index 02ad831..34e255c 100644 (file)
@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
 {
        struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
        struct device *dev = rdev->dev.parent;
+       int ret;
+
+       ret = dev_pm_genpd_set_performance_state(dev, 0);
+       if (ret)
+               return ret;
 
        priv->enable_counter--;
 
-       return dev_pm_genpd_set_performance_state(dev, 0);
+       return 0;
 }
 
 static int reg_is_enabled(struct regulator_dev *rdev)
index 0e16e31..ad2237a 100644 (file)
@@ -948,7 +948,7 @@ int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
        int ret;
        unsigned int sel;
 
-       if (!rdev->desc->n_ramp_values)
+       if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
                return -EINVAL;
 
        ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
index f6a14e9..d6340bb 100644 (file)
@@ -3,7 +3,7 @@
 // Device driver for regulators in Hisi IC
 //
 // Copyright (c) 2013 Linaro Ltd.
-// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2011 HiSilicon Ltd.
 // Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
 //
 // Guodong Xu <guodong.xu@linaro.org>
@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
                        .owner          = THIS_MODULE,                         \
                        .volt_table     = vtable,                              \
                        .n_voltages     = ARRAY_SIZE(vtable),                  \
-                       .vsel_mask      = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+                       .vsel_mask      = ARRAY_SIZE(vtable) - 1,              \
                        .vsel_reg       = vreg,                                \
                        .enable_reg     = ereg,                                \
                        .enable_mask    = emask,                               \
index ac2ee20..68cdb17 100644 (file)
@@ -2,7 +2,7 @@
 //
 // Device driver for regulators in Hi655x IC
 //
-// Copyright (c) 2016 Hisilicon.
+// Copyright (c) 2016 HiSilicon Ltd.
 //
 // Authors:
 // Chen Feng <puck.chen@hisilicon.com>
index 8d9731e..3cf8f08 100644 (file)
@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
        config.dev = dev;
        config.driver_data = pmic;
 
+       /*
+        * Set of_node_reuse flag to prevent driver core from attempting to
+        * claim any pinmux resources already claimed by the parent device.
+        * Otherwise PMIC driver will fail to re-probe.
+        */
+       device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
        for (id = 0; id < MAX77620_NUM_REGS; id++) {
                struct regulator_dev *rdev;
                struct regulator_desc *rdesc;
@@ -839,12 +846,10 @@ static int max77620_regulator_probe(struct platform_device *pdev)
                        return ret;
 
                rdev = devm_regulator_register(dev, rdesc, &config);
-               if (IS_ERR(rdev)) {
-                       ret = PTR_ERR(rdev);
-                       dev_err(dev, "Regulator registration %s failed: %d\n",
-                               rdesc->name, ret);
-                       return ret;
-               }
+               if (IS_ERR(rdev))
+                       return dev_err_probe(dev, PTR_ERR(rdev),
+                                            "Regulator registration %s failed\n",
+                                            rdesc->name);
        }
 
        return 0;
index 9edc349..6b8be52 100644 (file)
@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
        REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
 };
 
-static unsigned int mt6315_map_mode(u32 mode)
+static unsigned int mt6315_map_mode(unsigned int mode)
 {
        switch (mode) {
        case MT6315_BUCK_MODE_AUTO:
index 2055a9c..7a87788 100644 (file)
@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
        struct gpio_descs *gpios = priv->enable_gpios;
        int id = rdev_get_id(rdev), ret;
 
-       if (gpios->ndescs <= id) {
+       if (!gpios || gpios->ndescs <= id) {
                dev_warn(&rdev->dev, "no dedicated gpio can control\n");
                goto bypass_gpio;
        }
@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
        struct gpio_descs *gpios = priv->enable_gpios;
        int id = rdev_get_id(rdev);
 
-       if (gpios->ndescs <= id) {
+       if (!gpios || gpios->ndescs <= id) {
                dev_warn(&rdev->dev, "no dedicated gpio can control\n");
                goto bypass_gpio;
        }
index 852fb25..4bca64d 100644 (file)
@@ -27,6 +27,7 @@
 #define RTMV20_REG_LDIRQ       0x30
 #define RTMV20_REG_LDSTAT      0x40
 #define RTMV20_REG_LDMASK      0x50
+#define RTMV20_MAX_REGS                (RTMV20_REG_LDMASK + 1)
 
 #define RTMV20_VID_MASK                GENMASK(7, 4)
 #define RICHTEK_VID            0x80
@@ -103,9 +104,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
        return 0;
 }
 
+static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
+                                       int max_uA)
+{
+       int sel;
+
+       if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
+               return -EINVAL;
+
+       if (max_uA > RTMV20_LSW_MAXUA)
+               max_uA = RTMV20_LSW_MAXUA;
+
+       sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
+
+       /* Ensure the selected setting is still in range */
+       if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
+               return -EINVAL;
+
+       sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+                                 rdev->desc->csel_mask, sel);
+}
+
+static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+       if (ret)
+               return ret;
+
+       val &= rdev->desc->csel_mask;
+       val >>= ffs(rdev->desc->csel_mask) - 1;
+
+       return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
+}
+
 static const struct regulator_ops rtmv20_regulator_ops = {
-       .set_current_limit = regulator_set_current_limit_regmap,
-       .get_current_limit = regulator_get_current_limit_regmap,
+       .set_current_limit = rtmv20_lsw_set_current_limit,
+       .get_current_limit = rtmv20_lsw_get_current_limit,
        .enable = rtmv20_lsw_enable,
        .disable = rtmv20_lsw_disable,
        .is_enabled = regulator_is_enabled_regmap,
@@ -275,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
        .max_register = RTMV20_REG_LDMASK,
+       .num_reg_defaults_raw = RTMV20_MAX_REGS,
 
        .writeable_reg = rtmv20_is_accessible_reg,
        .readable_reg = rtmv20_is_accessible_reg,
index bbadf72..1f02f60 100644 (file)
@@ -173,7 +173,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
                sreg->desc.uV_step =
                        vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
                sreg->desc.linear_min_sel = 0;
-               sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
+               sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
                sreg->desc.ops = &scmi_reg_linear_ops;
        }
 
index ecefc25..337353c 100644 (file)
@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
 {
        struct ap_queue_status status;
        struct ap_message *ap_msg;
+       bool found = false;
 
        status = ap_dqap(aq->qid, &aq->reply->psmid,
                         aq->reply->msg, aq->reply->len);
        switch (status.response_code) {
        case AP_RESPONSE_NORMAL:
-               aq->queue_count--;
+               aq->queue_count = max_t(int, 0, aq->queue_count - 1);
                if (aq->queue_count > 0)
                        mod_timer(&aq->timeout,
                                  jiffies + aq->request_timeout);
@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
                        list_del_init(&ap_msg->list);
                        aq->pendingq_count--;
                        ap_msg->receive(aq, ap_msg, aq->reply);
+                       found = true;
                        break;
                }
+               if (!found) {
+                       AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+                                   __func__, aq->reply->psmid,
+                                   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+               }
                fallthrough;
        case AP_RESPONSE_NO_PENDING_REPLY:
                if (!status.queue_empty || aq->queue_count <= 0)
@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
                           ap_msg->flags & AP_MSG_FLAG_SPECIAL);
        switch (status.response_code) {
        case AP_RESPONSE_NORMAL:
-               aq->queue_count++;
+               aq->queue_count = max_t(int, 1, aq->queue_count + 1);
                if (aq->queue_count == 1)
                        mod_timer(&aq->timeout, jiffies + aq->request_timeout);
                list_move_tail(&ap_msg->list, &aq->pendingq);
index 697c09e..cd52664 100644 (file)
@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
        device_enable_async_suspend(&shost->shost_dev);
 
+       get_device(&shost->shost_gendev);
        error = device_add(&shost->shost_dev);
        if (error)
                goto out_del_gendev;
 
-       get_device(&shost->shost_gendev);
-
        if (shost->transportt->host_size) {
                shost->shost_data = kzalloc(shost->transportt->host_size,
                                         GFP_KERNEL);
@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
                if (!shost->work_q) {
                        error = -EINVAL;
-                       goto out_free_shost_data;
+                       goto out_del_dev;
                }
        }
 
        error = scsi_sysfs_add_host(shost);
        if (error)
-               goto out_destroy_host;
+               goto out_del_dev;
 
        scsi_proc_host_add(shost);
        scsi_autopm_put_host(shost);
        return error;
 
- out_destroy_host:
-       if (shost->work_q)
-               destroy_workqueue(shost->work_q);
- out_free_shost_data:
-       kfree(shost->shost_data);
+       /*
+        * Any host allocation in this function will be freed in
+        * scsi_host_dev_release().
+        */
  out_del_dev:
        device_del(&shost->shost_dev);
  out_del_gendev:
+       /*
+        * Host state is SHOST_RUNNING so we have to explicitly release
+        * ->shost_dev.
+        */
+       put_device(&shost->shost_dev);
        device_del(&shost->shost_gendev);
  out_disable_runtime_pm:
        device_disable_async_suspend(&shost->shost_gendev);
        pm_runtime_disable(&shost->shost_gendev);
        pm_runtime_set_suspended(&shost->shost_gendev);
        pm_runtime_put_noidle(&shost->shost_gendev);
-       scsi_mq_destroy_tags(shost);
  fail:
        return error;
 }
@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
 
        ida_simple_remove(&host_index_ida, shost->host_no);
 
-       if (parent)
+       if (shost->shost_state != SHOST_CREATED)
                put_device(parent);
        kfree(shost);
 }
@@ -388,8 +390,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        mutex_init(&shost->scan_mutex);
 
        index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
-       if (index < 0)
-               goto fail_kfree;
+       if (index < 0) {
+               kfree(shost);
+               return NULL;
+       }
        shost->host_no = index;
 
        shost->dma_channel = 0xff;
@@ -481,7 +485,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
                shost_printk(KERN_WARNING, shost,
                        "error handler thread failed to spawn, error = %ld\n",
                        PTR_ERR(shost->ehandler));
-               goto fail_index_remove;
+               goto fail;
        }
 
        shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -490,17 +494,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        if (!shost->tmf_work_q) {
                shost_printk(KERN_WARNING, shost,
                             "failed to create tmf workq\n");
-               goto fail_kthread;
+               goto fail;
        }
        scsi_proc_hostdir_add(shost->hostt);
        return shost;
+ fail:
+       /*
+        * Host state is still SHOST_CREATED and that is enough to release
+        * ->shost_gendev. scsi_host_dev_release() will free
+        * dev_name(&shost->shost_dev).
+        */
+       put_device(&shost->shost_gendev);
 
- fail_kthread:
-       kthread_stop(shost->ehandler);
- fail_index_remove:
-       ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
-       kfree(shost);
        return NULL;
 }
 EXPORT_SYMBOL(scsi_host_alloc);
index 573c859..fc3682f 100644 (file)
@@ -20589,10 +20589,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        abtswqe = &abtsiocb->wqe;
        memset(abtswqe, 0, sizeof(*abtswqe));
 
-       if (lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba))
                bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
-       else
-               bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
        bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
        abtswqe->abort_cmd.rsrvd5 = 0;
        abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
index 7562311..b92570a 100644 (file)
@@ -1827,22 +1827,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
                fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
                QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
                           "WWPN (0x%s) already exists.\n", buf);
-               goto err1;
+               return rc;
        }
 
        if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
                QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
                           "because link is not up.\n");
-               rc = -EIO;
-               goto err1;
+               return -EIO;
        }
 
        vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
        if (!vn_port) {
                QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
                           "for vport.\n");
-               rc = -ENOMEM;
-               goto err1;
+               return -ENOMEM;
        }
 
        fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1866,7 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        if (rc) {
                QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
                    "for lport stats.\n");
-               goto err2;
+               goto err;
        }
 
        fc_set_wwnn(vn_port, vport->node_name);
@@ -1884,7 +1882,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        if (rc) {
                QEDF_WARN(&base_qedf->dbg_ctx,
                          "Error adding Scsi_Host rc=0x%x.\n", rc);
-               goto err2;
+               goto err;
        }
 
        /* Set default dev_loss_tmo based on module parameter */
@@ -1925,9 +1923,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
        vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
 
-err2:
+       return 0;
+
+err:
        scsi_host_put(vn_port->host);
-err1:
        return rc;
 }
 
@@ -1968,8 +1967,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
        fc_lport_free_stats(vn_port);
 
        /* Release Scsi_Host */
-       if (vn_port->host)
-               scsi_host_put(vn_port->host);
+       scsi_host_put(vn_port->host);
 
 out:
        return 0;
index d92cec1..d33355a 100644 (file)
@@ -184,6 +184,7 @@ static struct {
        {"HP", "C3323-300", "4269", BLIST_NOTQ},
        {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
        {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+       {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
        {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
        {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
        {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
index aee3cfc..0a84ec9 100644 (file)
@@ -603,11 +603,23 @@ static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
 
        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
        if (!ret) {
-               if (ver >= UFS_UNIPRO_VER_1_8)
+               if (ver >= UFS_UNIPRO_VER_1_8) {
                        host->hw_ver.major = 3;
+                       /*
+                        * Fix HCI version for some platforms with
+                        * incorrect version
+                        */
+                       if (hba->ufs_version < ufshci_version(3, 0))
+                               hba->ufs_version = ufshci_version(3, 0);
+               }
        }
 }
 
+static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
+{
+       return hba->ufs_version;
+}
+
 /**
  * ufs_mtk_init - find other essential mmio bases
  * @hba: host controller instance
@@ -1048,6 +1060,7 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
        .name                = "mediatek.ufshci",
        .init                = ufs_mtk_init,
+       .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
        .setup_clocks        = ufs_mtk_setup_clocks,
        .hce_enable_notify   = ufs_mtk_hce_enable_notify,
        .link_startup_notify = ufs_mtk_link_startup_notify,
index e195747..6dd1902 100644 (file)
@@ -626,10 +626,8 @@ static int meson_msr_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(base)) {
-               dev_err(&pdev->dev, "io resource mapping failed\n");
+       if (IS_ERR(base))
                return PTR_ERR(base);
-       }
 
        priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
                                             &meson_clk_msr_regmap_config);
index 8965fe6..fe40626 100644 (file)
@@ -68,7 +68,7 @@
 #define BCM2835_SPI_FIFO_SIZE          64
 #define BCM2835_SPI_FIFO_SIZE_3_4      48
 #define BCM2835_SPI_DMA_MIN_LENGTH     96
-#define BCM2835_SPI_NUM_CS               /* raise as necessary */
+#define BCM2835_SPI_NUM_CS             24  /* raise as necessary */
 #define BCM2835_SPI_MODE_BITS  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
                                | SPI_NO_CS | SPI_3WIRE)
 
@@ -1195,6 +1195,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
        struct gpio_chip *chip;
        u32 cs;
 
+       if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
+               dev_err(&spi->dev, "only %d chip-selects supported\n",
+                       BCM2835_SPI_NUM_CS - 1);
+               return -EINVAL;
+       }
+
        /*
         * Precalculate SPI slave's CS register value for ->prepare_message():
         * The driver always uses software-controlled GPIO chip select, hence
@@ -1288,7 +1294,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
        ctlr->use_gpio_descriptors = true;
        ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
-       ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
+       ctlr->num_chipselect = 3;
        ctlr->setup = bcm2835_spi_setup;
        ctlr->transfer_one = bcm2835_spi_transfer_one;
        ctlr->handle_err = bcm2835_spi_handle_err;
index 6a6af85..27d0087 100644 (file)
@@ -184,6 +184,8 @@ int spi_bitbang_setup(struct spi_device *spi)
 {
        struct spi_bitbang_cs   *cs = spi->controller_state;
        struct spi_bitbang      *bitbang;
+       bool                    initial_setup = false;
+       int                     retval;
 
        bitbang = spi_master_get_devdata(spi->master);
 
@@ -192,22 +194,30 @@ int spi_bitbang_setup(struct spi_device *spi)
                if (!cs)
                        return -ENOMEM;
                spi->controller_state = cs;
+               initial_setup = true;
        }
 
        /* per-word shift register access, in hardware or bitbanging */
        cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
-       if (!cs->txrx_word)
-               return -EINVAL;
+       if (!cs->txrx_word) {
+               retval = -EINVAL;
+               goto err_free;
+       }
 
        if (bitbang->setup_transfer) {
-               int retval = bitbang->setup_transfer(spi, NULL);
+               retval = bitbang->setup_transfer(spi, NULL);
                if (retval < 0)
-                       return retval;
+                       goto err_free;
        }
 
        dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
 
        return 0;
+
+err_free:
+       if (initial_setup)
+               kfree(cs);
+       return retval;
 }
 EXPORT_SYMBOL_GPL(spi_bitbang_setup);
 
index d0e5aa1..bdf94cc 100644 (file)
@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
 {
        struct mpc8xxx_spi *mpc8xxx_spi;
        struct fsl_spi_reg __iomem *reg_base;
+       bool initial_setup = false;
        int retval;
        u32 hw_mode;
        struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
                if (!cs)
                        return -ENOMEM;
                spi_set_ctldata(spi, cs);
+               initial_setup = true;
        }
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
 
@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
        retval = fsl_spi_setup_transfer(spi, NULL);
        if (retval < 0) {
                cs->hw_mode = hw_mode; /* Restore settings */
+               if (initial_setup)
+                       kfree(cs);
                return retval;
        }
 
index 71402f7..df28c66 100644 (file)
@@ -424,15 +424,22 @@ done:
 static int uwire_setup(struct spi_device *spi)
 {
        struct uwire_state *ust = spi->controller_state;
+       bool initial_setup = false;
+       int status;
 
        if (ust == NULL) {
                ust = kzalloc(sizeof(*ust), GFP_KERNEL);
                if (ust == NULL)
                        return -ENOMEM;
                spi->controller_state = ust;
+               initial_setup = true;
        }
 
-       return uwire_setup_transfer(spi, NULL);
+       status = uwire_setup_transfer(spi, NULL);
+       if (status && initial_setup)
+               kfree(ust);
+
+       return status;
 }
 
 static void uwire_cleanup(struct spi_device *spi)
index 999c227..ede7f05 100644 (file)
@@ -1032,8 +1032,22 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
        }
 }
 
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+       struct omap2_mcspi_cs   *cs;
+
+       if (spi->controller_state) {
+               /* Unlink controller state from context save list */
+               cs = spi->controller_state;
+               list_del(&cs->node);
+
+               kfree(cs);
+       }
+}
+
 static int omap2_mcspi_setup(struct spi_device *spi)
 {
+       bool                    initial_setup = false;
        int                     ret;
        struct omap2_mcspi      *mcspi = spi_master_get_devdata(spi->master);
        struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                spi->controller_state = cs;
                /* Link this to context save list */
                list_add_tail(&cs->node, &ctx->cs);
+               initial_setup = true;
        }
 
        ret = pm_runtime_get_sync(mcspi->dev);
        if (ret < 0) {
                pm_runtime_put_noidle(mcspi->dev);
+               if (initial_setup)
+                       omap2_mcspi_cleanup(spi);
 
                return ret;
        }
 
        ret = omap2_mcspi_setup_transfer(spi, NULL);
+       if (ret && initial_setup)
+               omap2_mcspi_cleanup(spi);
+
        pm_runtime_mark_last_busy(mcspi->dev);
        pm_runtime_put_autosuspend(mcspi->dev);
 
        return ret;
 }
 
-static void omap2_mcspi_cleanup(struct spi_device *spi)
-{
-       struct omap2_mcspi_cs   *cs;
-
-       if (spi->controller_state) {
-               /* Unlink controller state from context save list */
-               cs = spi->controller_state;
-               list_del(&cs->node);
-
-               kfree(cs);
-       }
-}
-
 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
 {
        struct omap2_mcspi *mcspi = data;
index 5e59ba0..8ee0cc0 100644 (file)
@@ -1254,6 +1254,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
                chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
 
                err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
+               if (err)
+                       gpiod_put(chip->gpiod_cs);
        }
 
        return err;
@@ -1267,6 +1269,7 @@ static int setup(struct spi_device *spi)
        struct driver_data *drv_data =
                spi_controller_get_devdata(spi->controller);
        uint tx_thres, tx_hi_thres, rx_thres;
+       int err;
 
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
@@ -1413,7 +1416,11 @@ static int setup(struct spi_device *spi)
        if (drv_data->ssp_type == CE4100_SSP)
                return 0;
 
-       return setup_cs(spi, chip, chip_info);
+       err = setup_cs(spi, chip, chip_info);
+       if (err)
+               kfree(chip);
+
+       return err;
 }
 
 static void cleanup(struct spi_device *spi)
index 7e640cc..594f641 100644 (file)
@@ -294,7 +294,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
        int err = 0;
 
        if (!op->data.nbytes)
-               return stm32_qspi_wait_nobusy(qspi);
+               goto wait_nobusy;
 
        if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
                goto out;
@@ -315,6 +315,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
 out:
        /* clear flags */
        writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+wait_nobusy:
+       if (!err)
+               err = stm32_qspi_wait_nobusy(qspi);
 
        return err;
 }
index 5a3d81c..9262c64 100644 (file)
@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        xqspi->irq = platform_get_irq(pdev, 0);
        if (xqspi->irq <= 0) {
                ret = -ENXIO;
-               goto remove_master;
+               goto clk_dis_all;
        }
        ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
                               0, pdev->name, xqspi);
        if (ret != 0) {
                ret = -ENXIO;
                dev_err(&pdev->dev, "request_irq failed\n");
-               goto remove_master;
+               goto clk_dis_all;
        }
 
        ret = of_property_read_u32(np, "num-cs",
@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        if (ret < 0) {
                ctlr->num_chipselect = 1;
        } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+               ret = -EINVAL;
                dev_err(&pdev->dev, "only 2 chip selects are available\n");
-               goto remove_master;
+               goto clk_dis_all;
        } else {
                ctlr->num_chipselect = num_cs;
        }
index 33e28cc..b5229bc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- *  Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
  *  GDMA4740 DMAC support
  */
 
@@ -914,6 +913,5 @@ static struct platform_driver gdma_dma_driver = {
 };
 module_platform_driver(gdma_dma_driver);
 
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("Ralink/MTK DMA driver");
 MODULE_LICENSE("GPL v2");
index a6d731e..4378592 100644 (file)
@@ -2091,7 +2091,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
        struct net_device *ndev = padapter->pnetdev;
 
        {
-               struct station_info sinfo;
+               struct station_info sinfo = {};
                u8 ie_offset;
                if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
                        ie_offset = _ASOCREQ_IE_OFFSET_;
@@ -2284,7 +2284,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
        mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
        mon_ndev->ieee80211_ptr = mon_wdev;
 
-       ret = register_netdevice(mon_ndev);
+       ret = cfg80211_register_netdevice(mon_ndev);
        if (ret) {
                goto out;
        }
@@ -2360,7 +2360,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
        adapter = rtw_netdev_priv(ndev);
        pwdev_priv = adapter_wdev_data(adapter);
 
-       unregister_netdevice(ndev);
+       cfg80211_unregister_netdevice(ndev);
 
        if (ndev == pwdev_priv->pmon_ndev) {
                pwdev_priv->pmon_ndev = NULL;
index 05d7ffd..7e35edd 100644 (file)
@@ -3121,9 +3121,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
-
-       assert_spin_locked(&cmd->t_state_lock);
-       WARN_ON_ONCE(!irqs_disabled());
+       lockdep_assert_held(&cmd->t_state_lock);
 
        if (fabric_stop)
                cmd->transport_state |= CMD_T_FABRIC_STOP;
index 6132cc8..6e6eb83 100644 (file)
@@ -220,6 +220,7 @@ int optee_open_session(struct tee_context *ctx,
        struct optee_msg_arg *msg_arg;
        phys_addr_t msg_parg;
        struct optee_session *sess = NULL;
+       uuid_t client_uuid;
 
        /* +2 for the meta parameters added below */
        shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
@@ -240,10 +241,11 @@ int optee_open_session(struct tee_context *ctx,
        memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
        msg_arg->params[1].u.value.c = arg->clnt_login;
 
-       rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
-                                         arg->clnt_login, arg->clnt_uuid);
+       rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+                                         arg->clnt_uuid);
        if (rc)
                goto out;
+       export_uuid(msg_arg->params[1].u.octets, &client_uuid);
 
        rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
        if (rc)
index 81ff593..e3d72d0 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/types.h>
 
 /*
- * This file defines the OP-TEE message protocol used to communicate
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
  * with an instance of OP-TEE running in secure world.
  *
  * This file is divided into two sections.
@@ -144,9 +144,10 @@ struct optee_msg_param_value {
  * @tmem:      parameter by temporary memory reference
  * @rmem:      parameter by registered memory reference
  * @value:     parameter by opaque value
+ * @octets:    parameter by octet string
  *
  * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
  * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
  * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
  * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
@@ -157,6 +158,7 @@ struct optee_msg_param {
                struct optee_msg_param_tmem tmem;
                struct optee_msg_param_rmem rmem;
                struct optee_msg_param_value value;
+               u8 octets[24];
        } u;
 };
 
index f8e8825..99abdc0 100644 (file)
@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void)
        return atomic_read(&therm_throt_en);
 }
 
+void __init therm_lvt_init(void)
+{
+       /*
+        * This function is only called on boot CPU. Save the init thermal
+        * LVT value on BSP and use that value to restore APs' thermal LVT
+        * entry BIOS programmed later
+        */
+       if (intel_thermal_supported(&boot_cpu_data))
+               lvtthmr_init = apic_read(APIC_LVTTHMR);
+}
+
 void intel_init_thermal(struct cpuinfo_x86 *c)
 {
        unsigned int cpu = smp_processor_id();
@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        if (!intel_thermal_supported(c))
                return;
 
-       /* On the BSP? */
-       if (c == &boot_cpu_data)
-               lvtthmr_init = apic_read(APIC_LVTTHMR);
-
        /*
         * First check if its enabled already, in which case there might
         * be some SMM goo which handles it, so we can't even put a handler
index 2f49c58..bd4e9f6 100644 (file)
@@ -553,7 +553,11 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
 {
        struct exar8250 *priv = pci_get_drvdata(pcidev);
        struct uart_8250_port *port = serial8250_get_port(priv->line[0]);
-       struct platform_device *pdev = port->port.private_data;
+       struct platform_device *pdev;
+
+       pdev = port->port.private_data;
+       if (!pdev)
+               return;
 
        device_remove_software_node(&pdev->dev);
        platform_device_unregister(pdev);
index a8b7b50..5281f8d 100644 (file)
@@ -2007,7 +2007,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
                else
                        mask = BIT(priv_ep->num);
 
-               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
                        cdns3_set_register_bit(&regs->tdl_from_trb, mask);
                        cdns3_set_register_bit(&regs->tdl_beh, mask);
                        cdns3_set_register_bit(&regs->tdl_beh2, mask);
@@ -2046,15 +2046,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        case USB_ENDPOINT_XFER_INT:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
 
-               if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
-                   priv_dev->dev_ver > DEV_VER_V2)
+               if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
                        ep_cfg |= EP_CFG_TDL_CHK;
                break;
        case USB_ENDPOINT_XFER_BULK:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
 
-               if ((priv_dev->dev_ver == DEV_VER_V2  && !priv_ep->dir) ||
-                   priv_dev->dev_ver > DEV_VER_V2)
+               if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
                        ep_cfg |= EP_CFG_TDL_CHK;
                break;
        default:
index 5f0513c..6897274 100644 (file)
@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
 {
        struct cdnsp_device *pdev = (struct cdnsp_device *)data;
        union cdnsp_trb *event_ring_deq;
+       unsigned long flags;
        int counter = 0;
 
-       spin_lock(&pdev->lock);
+       spin_lock_irqsave(&pdev->lock, flags);
 
        if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
                cdnsp_died(pdev);
-               spin_unlock(&pdev->lock);
+               spin_unlock_irqrestore(&pdev->lock, flags);
                return IRQ_HANDLED;
        }
 
@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
 
        cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
 
-       spin_unlock(&pdev->lock);
+       spin_unlock_irqrestore(&pdev->lock, flags);
 
        return IRQ_HANDLED;
 }
index 4545b23..bac0f54 100644 (file)
@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
        int val;
        unsigned long flags;
 
+       /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
+       spin_lock_irqsave(&usbmisc->lock, flags);
+       val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+       val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
+       writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+       spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+       /* TVDMSRC_DIS */
+       msleep(20);
+
        /* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
        spin_lock_irqsave(&usbmisc->lock, flags);
        val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
                                usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
        spin_unlock_irqrestore(&usbmisc->lock, flags);
 
-       usleep_range(1000, 2000);
+       /* TVDMSRC_ON */
+       msleep(40);
 
        /*
         * Per BC 1.2, check voltage of D+:
@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
                                usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
        spin_unlock_irqrestore(&usbmisc->lock, flags);
 
-       usleep_range(1000, 2000);
+       /* TVDPSRC_ON */
+       msleep(40);
 
        /* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
        val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
index fc7d6cd..df8e69e 100644 (file)
@@ -41,6 +41,8 @@
 #define USB_VENDOR_GENESYS_LOGIC               0x05e3
 #define USB_VENDOR_SMSC                                0x0424
 #define USB_PRODUCT_USB5534B                   0x5534
+#define USB_VENDOR_CYPRESS                     0x04b4
+#define USB_PRODUCT_CY7C65632                  0x6570
 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       0x01
 #define HUB_QUIRK_DISABLE_AUTOSUSPEND          0x02
 
@@ -5697,6 +5699,11 @@ static const struct usb_device_id hub_id_table[] = {
       .idProduct = USB_PRODUCT_USB5534B,
       .bInterfaceClass = USB_CLASS_HUB,
       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                   | USB_DEVICE_ID_MATCH_PRODUCT,
+      .idVendor = USB_VENDOR_CYPRESS,
+      .idProduct = USB_PRODUCT_CY7C65632,
+      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
                        | USB_DEVICE_ID_MATCH_INT_CLASS,
       .idVendor = USB_VENDOR_GENESYS_LOGIC,
index b6e53d8..4ac397e 100644 (file)
@@ -1671,8 +1671,8 @@ static int dwc3_remove(struct platform_device *pdev)
 
        pm_runtime_get_sync(&pdev->dev);
 
-       dwc3_debugfs_exit(dwc);
        dwc3_core_exit_mode(dwc);
+       dwc3_debugfs_exit(dwc);
 
        dwc3_core_exit(dwc);
        dwc3_ulpi_exit(dwc);
@@ -1690,11 +1690,6 @@ static int dwc3_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void dwc3_shutdown(struct platform_device *pdev)
-{
-       dwc3_remove(pdev);
-}
-
 #ifdef CONFIG_PM
 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
 {
@@ -2012,7 +2007,6 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
 static struct platform_driver dwc3_driver = {
        .probe          = dwc3_probe,
        .remove         = dwc3_remove,
-       .shutdown   = dwc3_shutdown,
        .driver         = {
                .name   = "dwc3",
                .of_match_table = of_match_ptr(of_dwc3_match),
index d0ac89c..d223c54 100644 (file)
@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
 
 
 #ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
 extern void dwc3_debugfs_init(struct dwc3 *d);
 extern void dwc3_debugfs_exit(struct dwc3 *d);
 #else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{  }
 static inline void dwc3_debugfs_init(struct dwc3 *d)
 {  }
 static inline void dwc3_debugfs_exit(struct dwc3 *d)
index 7146ee2..5dbbe53 100644 (file)
@@ -886,30 +886,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
        }
 }
 
-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
-               struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
 {
        struct dentry           *dir;
 
-       dir = debugfs_create_dir(dep->name, parent);
+       dir = debugfs_create_dir(dep->name, dep->dwc->root);
        dwc3_debugfs_create_endpoint_files(dep, dir);
 }
 
-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
-               struct dentry *parent)
-{
-       int                     i;
-
-       for (i = 0; i < dwc->num_eps; i++) {
-               struct dwc3_ep  *dep = dwc->eps[i];
-
-               if (!dep)
-                       continue;
-
-               dwc3_debugfs_create_endpoint_dir(dep, parent);
-       }
-}
-
 void dwc3_debugfs_init(struct dwc3 *dwc)
 {
        struct dentry           *root;
@@ -940,7 +924,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
                                &dwc3_testmode_fops);
                debugfs_create_file("link_state", 0644, root, dwc,
                                    &dwc3_link_state_fops);
-               dwc3_debugfs_create_endpoint_dirs(dwc, root);
        }
 }
 
index bdf1f98..ffe301d 100644 (file)
@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
                return PTR_ERR(priv->usb_glue_regmap);
 
        /* Create a regmap for each USB2 PHY control register set */
-       for (i = 0; i < priv->usb2_ports; i++) {
+       for (i = 0; i < priv->drvdata->num_phys; i++) {
                struct regmap_config u2p_regmap_config = {
                        .reg_bits = 8,
                        .val_bits = 32,
@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
                        .max_register = U2P_R1,
                };
 
+               if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+                       continue;
+
                u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
                                                        "u2p-%d", i);
                if (!u2p_regmap_config.name)
@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
 
        ret = priv->drvdata->usb_init(priv);
        if (ret)
-               goto err_disable_clks;
+               goto err_disable_regulator;
 
        /* Init PHYs */
        for (i = 0 ; i < PHY_COUNT ; ++i) {
                ret = phy_init(priv->phys[i]);
                if (ret)
-                       goto err_disable_clks;
+                       goto err_disable_regulator;
        }
 
        /* Set PHY Power */
@@ -816,6 +819,10 @@ err_phys_exit:
        for (i = 0 ; i < PHY_COUNT ; ++i)
                phy_exit(priv->phys[i]);
 
+err_disable_regulator:
+       if (priv->vbus)
+               regulator_disable(priv->vbus);
+
 err_disable_clks:
        clk_bulk_disable_unprepare(priv->drvdata->num_clks,
                                   priv->drvdata->clks);
index 8b668ef..3cd2942 100644 (file)
@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
                epnum |= 1;
 
        dep = dwc->eps[epnum];
+       if (dep == NULL)
+               return NULL;
+
        if (dep->flags & DWC3_EP_ENABLED)
                return dep;
 
index 612825a..f14c2aa 100644 (file)
@@ -2261,13 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
        }
 
        /*
-        * Synchronize any pending event handling before executing the controller
-        * halt routine.
+        * Synchronize and disable any further event handling while controller
+        * is being enabled/disabled.
         */
-       if (!is_on) {
-               dwc3_gadget_disable_irq(dwc);
-               synchronize_irq(dwc->irq_gadget);
-       }
+       disable_irq(dwc->irq_gadget);
 
        spin_lock_irqsave(&dwc->lock, flags);
 
@@ -2305,6 +2302,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
        ret = dwc3_gadget_run_stop(dwc, is_on, false);
        spin_unlock_irqrestore(&dwc->lock, flags);
+       enable_irq(dwc->irq_gadget);
+
        pm_runtime_put(dwc->dev);
 
        return ret;
@@ -2754,6 +2753,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
        INIT_LIST_HEAD(&dep->started_list);
        INIT_LIST_HEAD(&dep->cancelled_list);
 
+       dwc3_debugfs_create_endpoint_dir(dep);
+
        return 0;
 }
 
@@ -2797,6 +2798,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
                        list_del(&dep->endpoint.ep_list);
                }
 
+               debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
                kfree(dep);
        }
 }
@@ -4046,6 +4048,7 @@ err5:
        dwc3_gadget_free_endpoints(dwc);
 err4:
        usb_put_gadget(dwc->gadget);
+       dwc->gadget = NULL;
 err3:
        dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
                        dwc->bounce_addr);
@@ -4065,6 +4068,9 @@ err0:
 
 void dwc3_gadget_exit(struct dwc3 *dwc)
 {
+       if (!dwc->gadget)
+               return;
+
        usb_del_gadget(dwc->gadget);
        dwc3_gadget_free_endpoints(dwc);
        usb_put_gadget(dwc->gadget);
index 8bb2577..0550760 100644 (file)
@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
 {
        struct usb_gadget *g = f->config->cdev->gadget;
 
+       /* super-speed-plus descriptor falls back to super-speed one,
+        * if such a descriptor was provided, thus avoiding a NULL
+        * pointer dereference if a 5gbps capable gadget is used with
+        * a 10gbps capable config (device port + cable + host port)
+        */
+       if (!ssp)
+               ssp = ss;
+
        if (fs) {
                f->fs_descriptors = usb_copy_descriptors(fs);
                if (!f->fs_descriptors)
index 7f5cf48..ffe2486 100644 (file)
@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
                fs_ecm_notify_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
-                       ecm_ss_function, NULL);
+                       ecm_ss_function, ecm_ss_function);
        if (status)
                goto fail;
 
index cfcc4e8..2cd9942 100644 (file)
@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
        eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
-                       eem_ss_function, NULL);
+                       eem_ss_function, eem_ss_function);
        if (status)
                goto fail;
 
@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
                        skb2 = skb_clone(skb, GFP_ATOMIC);
                        if (unlikely(!skb2)) {
                                DBG(cdev, "unable to unframe EEM packet\n");
-                               continue;
+                               goto next;
                        }
                        skb_trim(skb2, len - ETH_FCS_LEN);
 
@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
                                                GFP_ATOMIC);
                        if (unlikely(!skb3)) {
                                dev_kfree_skb_any(skb2);
-                               continue;
+                               goto next;
                        }
                        dev_kfree_skb_any(skb2);
                        skb_queue_tail(list, skb3);
index bf10919..d4844af 100644 (file)
@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
                ffs->func = NULL;
        }
 
+       /* Drain any pending AIO completions */
+       drain_workqueue(ffs->io_completion_wq);
+
        if (!--opts->refcnt)
                functionfs_unbind(ffs);
 
index 1125f47..e556993 100644 (file)
@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
                hidg_fs_out_ep_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, hidg_fs_descriptors,
-                       hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+                       hidg_hs_descriptors, hidg_ss_descriptors,
+                       hidg_ss_descriptors);
        if (status)
                goto fail;
 
index b56ad7c..ae41f55 100644 (file)
@@ -207,7 +207,7 @@ autoconf_fail:
        ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
-                       ss_loopback_descs, NULL);
+                       ss_loopback_descs, ss_loopback_descs);
        if (ret)
                return ret;
 
index 019bea8..8551272 100644 (file)
@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
                data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
                data[1] = data[0];
 
-               DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+               DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
                ncm->notify_state = NCM_NOTIFY_CONNECT;
                break;
        }
@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
                        ncm->ndp_dgram_count = 1;
 
                        /* Note: we skip opts->next_ndp_index */
-               }
 
-               /* Delay the timer. */
-               hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
-                             HRTIMER_MODE_REL_SOFT);
+                       /* Start the timer. */
+                       hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+                                     HRTIMER_MODE_REL_SOFT);
+               }
 
                /* Add the datagram position entries */
                ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
index f47fdc1..59d382f 100644 (file)
@@ -1101,7 +1101,8 @@ autoconf_fail:
        ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_printer_function,
-                       hs_printer_function, ss_printer_function, NULL);
+                       hs_printer_function, ss_printer_function,
+                       ss_printer_function);
        if (ret)
                return ret;
 
index 0739b05..ee95e8f 100644 (file)
@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
        ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
-                       eth_ss_function, NULL);
+                       eth_ss_function, eth_ss_function);
        if (status)
                goto fail;
 
index e627138..1ed8ff0 100644 (file)
@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
        gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
-                       gser_ss_function, NULL);
+                       gser_ss_function, gser_ss_function);
        if (status)
                goto fail;
        dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
index 5a201ba..1abf08e 100644 (file)
@@ -431,7 +431,8 @@ no_iso:
        ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_source_sink_descs,
-                       hs_source_sink_descs, ss_source_sink_descs, NULL);
+                       hs_source_sink_descs, ss_source_sink_descs,
+                       ss_source_sink_descs);
        if (ret)
                return ret;
 
index 4d94525..51c1cae 100644 (file)
@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
                fs_subset_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
-                       ss_eth_function, NULL);
+                       ss_eth_function, ss_eth_function);
        if (status)
                goto fail;
 
index 7acb507..de161ee 100644 (file)
@@ -2057,7 +2057,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
        uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, uasp_fs_function_desc,
-                       uasp_hs_function_desc, uasp_ss_function_desc, NULL);
+                       uasp_hs_function_desc, uasp_ss_function_desc,
+                       uasp_ss_function_desc);
        if (ret)
                goto ep_fail;
 
index 7bc18cf..18c2bbd 100644 (file)
@@ -59,6 +59,7 @@
 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
 
+#define PCI_DEVICE_ID_AMD_RENOIR_XHCI                  0x1639
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2                        0x43bb
@@ -182,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
                xhci->quirks |= XHCI_U2_DISABLE_WAKE;
 
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+               pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+               xhci->quirks |= XHCI_BROKEN_D3COLD;
+
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
@@ -539,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * Systems with the TI redriver that loses port status change events
         * need to have the registers polled during D3, so avoid D3cold.
         */
-       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+       if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
                pci_d3cold_disable(pdev);
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
index 2595a8f..e417f5c 100644 (file)
@@ -1892,6 +1892,7 @@ struct xhci_hcd {
 #define XHCI_DISABLE_SPARSE    BIT_ULL(38)
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
+#define XHCI_BROKEN_D3COLD     BIT_ULL(41)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index b3cfe86..3366530 100644 (file)
@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
                return -EINVAL;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r)
+               return -EINVAL;
 
        pdata = devm_kzalloc(&pdev->dev,
                             sizeof(*pdata) +
index 8f09a38..4c8f011 100644 (file)
@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                        schedule_delayed_work(&musb->irq_work,
                                              msecs_to_jiffies(1000));
                        musb->quirk_retries--;
-                       break;
                }
-               fallthrough;
+               break;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
index ee595d1..fcb812b 100644 (file)
@@ -252,9 +252,11 @@ struct cp210x_serial_private {
        u8                      gpio_input;
 #endif
        u8                      partnum;
+       u32                     fw_version;
        speed_t                 min_speed;
        speed_t                 max_speed;
        bool                    use_actual_rate;
+       bool                    no_flow_control;
 };
 
 enum cp210x_event_state {
@@ -398,6 +400,7 @@ struct cp210x_special_chars {
 
 /* CP210X_VENDOR_SPECIFIC values */
 #define CP210X_READ_2NCONFIG   0x000E
+#define CP210X_GET_FW_VER_2N   0x0010
 #define CP210X_READ_LATCH      0x00C2
 #define CP210X_GET_PARTNUM     0x370B
 #define CP210X_GET_PORTCONFIG  0x370C
@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
 #define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX      587
 #define CP210X_2NCONFIG_GPIO_CONTROL_IDX       600
 
+/* CP2102N QFN20 port configuration values */
+#define CP2102N_QFN20_GPIO2_TXLED_MODE         BIT(2)
+#define CP2102N_QFN20_GPIO3_RXLED_MODE         BIT(3)
+#define CP2102N_QFN20_GPIO1_RS485_MODE         BIT(4)
+#define CP2102N_QFN20_GPIO0_CLK_MODE           BIT(6)
+
 /* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
 struct cp210x_gpio_write {
        u8      mask;
@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
 static void cp210x_set_flow_control(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
+       struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
        struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        struct cp210x_special_chars chars;
        struct cp210x_flow_ctl flow_ctl;
@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
        u32 ctl_hs;
        int ret;
 
+       /*
+        * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
+        * CP2102N_E104). Report back that flow control is not supported.
+        */
+       if (priv->no_flow_control) {
+               tty->termios.c_cflag &= ~CRTSCTS;
+               tty->termios.c_iflag &= ~(IXON | IXOFF);
+       }
+
        if (old_termios &&
                        C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
                        I_IXON(tty) == (old_termios->c_iflag & IXON) &&
@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
                port_priv->crtscts = false;
        }
 
-       if (I_IXOFF(tty))
+       if (I_IXOFF(tty)) {
                flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
-       else
+
+               flow_ctl.ulXonLimit = cpu_to_le32(128);
+               flow_ctl.ulXoffLimit = cpu_to_le32(128);
+       } else {
                flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+       }
 
        if (I_IXON(tty))
                flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
        else
                flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
 
-       flow_ctl.ulXonLimit = cpu_to_le32(128);
-       flow_ctl.ulXoffLimit = cpu_to_le32(128);
-
        dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
                        ctl_hs, flow_repl);
 
@@ -1733,7 +1753,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
        priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
 
        /* 0 indicates GPIO mode, 1 is alternate function */
-       priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+       if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
+               /* QFN20 is special... */
+               if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE)   /* GPIO 0 */
+                       priv->gpio_altfunc |= BIT(0);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
+                       priv->gpio_altfunc |= BIT(1);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
+                       priv->gpio_altfunc |= BIT(2);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
+                       priv->gpio_altfunc |= BIT(3);
+       } else {
+               priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+       }
 
        if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
                /*
@@ -1908,6 +1940,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
        priv->use_actual_rate = use_actual_rate;
 }
 
+static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       u8 ver[3];
+       int ret;
+
+       ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
+                       ver, sizeof(ver));
+       if (ret)
+               return ret;
+
+       dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
+                       ver[0], ver[1], ver[2]);
+
+       priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
+
+       return 0;
+}
+
+static void cp210x_determine_quirks(struct usb_serial *serial)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       int ret;
+
+       switch (priv->partnum) {
+       case CP210X_PARTNUM_CP2102N_QFN28:
+       case CP210X_PARTNUM_CP2102N_QFN24:
+       case CP210X_PARTNUM_CP2102N_QFN20:
+               ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
+               if (ret)
+                       break;
+               if (priv->fw_version <= 0x10004)
+                       priv->no_flow_control = true;
+               break;
+       default:
+               break;
+       }
+}
+
 static int cp210x_attach(struct usb_serial *serial)
 {
        int result;
@@ -1928,6 +1999,7 @@ static int cp210x_attach(struct usb_serial *serial)
 
        usb_set_serial_data(serial, priv);
 
+       cp210x_determine_quirks(serial);
        cp210x_init_max_speed(serial);
 
        result = cp210x_gpio_init(serial);
index 369ef14..4a1f3a9 100644 (file)
@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
index d854e04..add602b 100644 (file)
 #define FTDI_NT_ORIONLXM_PID           0x7c90  /* OrionLXm Substation Automation Platform */
 #define FTDI_NT_ORIONLX_PLUS_PID       0x7c91  /* OrionLX+ Substation Automation Platform */
 #define FTDI_NT_ORION_IO_PID           0x7c92  /* Orion I/O */
+#define FTDI_NT_ORIONMX_PID            0x7c93  /* OrionMX */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
index 83c62f9..41f1b87 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * USB ZyXEL omni.net LCD PLUS driver
+ * USB ZyXEL omni.net driver
  *
  * Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
  *
 #include <linux/usb/serial.h>
 
 #define DRIVER_AUTHOR "Alessandro Zummo"
-#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
+#define DRIVER_DESC "USB ZyXEL omni.net Driver"
 
 #define ZYXEL_VENDOR_ID                0x0586
 #define ZYXEL_OMNINET_ID       0x1000
+#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
 /* This one seems to be a re-branded ZyXEL device */
 #define BT_IGNITIONPRO_ID      0x2000
 
@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+       { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
        { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
        { }                                             /* Terminating entry */
 };
@@ -50,7 +52,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
                .owner =        THIS_MODULE,
                .name =         "omninet",
        },
-       .description =          "ZyXEL - omni.net lcd plus usb",
+       .description =          "ZyXEL - omni.net usb",
        .id_table =             id_table,
        .num_bulk_out =         2,
        .calc_num_ports =       omninet_calc_num_ports,
index 5f2e7f6..067690d 100644 (file)
@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
 
        /* flush the port transmit buffer */
        i = usb_control_msg(serial->dev,
-                           usb_rcvctrlpipe(serial->dev, 0),
+                           usb_sndctrlpipe(serial->dev, 0),
                            QT2_FLUSH_DEVICE, 0x40, 1,
                            port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
 
@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
 
        /* flush the port receive buffer */
        i = usb_control_msg(serial->dev,
-                           usb_rcvctrlpipe(serial->dev, 0),
+                           usb_sndctrlpipe(serial->dev, 0),
                            QT2_FLUSH_DEVICE, 0x40, 0,
                            port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
 
@@ -639,7 +639,7 @@ static int qt2_attach(struct usb_serial *serial)
        int status;
 
        /* power on unit */
-       status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+       status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                 0xc2, 0x40, 0x8000, 0, NULL, 0,
                                 QT2_USB_TIMEOUT);
        if (status < 0) {
index 8514bec..77dabd3 100644 (file)
@@ -239,7 +239,7 @@ find_mux:
        dev = class_find_device(&typec_mux_class, NULL, fwnode,
                                mux_fwnode_match);
 
-       return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+       return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
 }
 
 /**
index 46a25b8..ffa8aa1 100644 (file)
@@ -582,10 +582,15 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
        acpi_dev_free_resource_list(&resource_list);
 
        if (!pmc->iom_base) {
-               put_device(&adev->dev);
+               acpi_dev_put(adev);
                return -ENOMEM;
        }
 
+       if (IS_ERR(pmc->iom_base)) {
+               acpi_dev_put(adev);
+               return PTR_ERR(pmc->iom_base);
+       }
+
        pmc->iom_adev = adev;
 
        return 0;
@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
                        break;
 
                ret = pmc_usb_register_port(pmc, i, fwnode);
-               if (ret)
+               if (ret) {
+                       fwnode_handle_put(fwnode);
                        goto err_remove_ports;
+               }
        }
 
        platform_set_drvdata(pdev, pmc);
@@ -651,7 +658,7 @@ err_remove_ports:
                usb_role_switch_unregister(pmc->port[i].usb_sw);
        }
 
-       put_device(&pmc->iom_adev->dev);
+       acpi_dev_put(pmc->iom_adev);
 
        return ret;
 }
@@ -667,7 +674,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
                usb_role_switch_unregister(pmc->port[i].usb_sw);
        }
 
-       put_device(&pmc->iom_adev->dev);
+       acpi_dev_put(pmc->iom_adev);
 
        return 0;
 }
index 9ce8c9a..63470cf 100644 (file)
@@ -401,6 +401,8 @@ struct tcpm_port {
        unsigned int nr_src_pdo;
        u32 snk_pdo[PDO_MAX_OBJECTS];
        unsigned int nr_snk_pdo;
+       u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+       unsigned int nr_snk_vdo_v1;
        u32 snk_vdo[VDO_MAX_OBJECTS];
        unsigned int nr_snk_vdo;
 
@@ -1547,42 +1549,45 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                        if (PD_VDO_VID(p[0]) != USB_SID_PD)
                                break;
 
-                       if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+                       if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
                                typec_partner_set_svdm_version(port->partner,
                                                               PD_VDO_SVDM_VER(p[0]));
+                               svdm_version = PD_VDO_SVDM_VER(p[0]);
+                       }
 
-                       tcpm_ams_start(port, DISCOVER_IDENTITY);
-                       /* 6.4.4.3.1: Only respond as UFP (device) */
-                       if (port->data_role == TYPEC_DEVICE &&
+                       port->ams = DISCOVER_IDENTITY;
+                       /*
+                        * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+                        * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+                        * "wrong configuation" or "Unrecognized"
+                        */
+                       if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
                            port->nr_snk_vdo) {
-                               /*
-                                * Product Type DFP and Connector Type are not defined in SVDM
-                                * version 1.0 and shall be set to zero.
-                                */
-                               if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
-                                       response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
-                                                     & ~IDH_CONN_MASK;
-                               else
-                                       response[1] = port->snk_vdo[0];
-                               for (i = 1; i <  port->nr_snk_vdo; i++)
-                                       response[i + 1] = port->snk_vdo[i];
-                               rlen = port->nr_snk_vdo + 1;
+                               if (svdm_version < SVDM_VER_2_0) {
+                                       for (i = 0; i < port->nr_snk_vdo_v1; i++)
+                                               response[i + 1] = port->snk_vdo_v1[i];
+                                       rlen = port->nr_snk_vdo_v1 + 1;
+
+                               } else {
+                                       for (i = 0; i < port->nr_snk_vdo; i++)
+                                               response[i + 1] = port->snk_vdo[i];
+                                       rlen = port->nr_snk_vdo + 1;
+                               }
                        }
                        break;
                case CMD_DISCOVER_SVID:
-                       tcpm_ams_start(port, DISCOVER_SVIDS);
+                       port->ams = DISCOVER_SVIDS;
                        break;
                case CMD_DISCOVER_MODES:
-                       tcpm_ams_start(port, DISCOVER_MODES);
+                       port->ams = DISCOVER_MODES;
                        break;
                case CMD_ENTER_MODE:
-                       tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
+                       port->ams = DFP_TO_UFP_ENTER_MODE;
                        break;
                case CMD_EXIT_MODE:
-                       tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
+                       port->ams = DFP_TO_UFP_EXIT_MODE;
                        break;
                case CMD_ATTENTION:
-                       tcpm_ams_start(port, ATTENTION);
                        /* Attention command does not have response */
                        *adev_action = ADEV_ATTENTION;
                        return 0;
@@ -1937,6 +1942,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                        tcpm_log(port, "VDM Tx error, retry");
                        port->vdm_retries++;
                        port->vdm_state = VDM_STATE_READY;
+                       if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+                               tcpm_ams_finish(port);
+               } else {
                        tcpm_ams_finish(port);
                }
                break;
@@ -2183,20 +2191,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
 
        if (!type) {
                tcpm_log(port, "Alert message received with no type");
+               tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
                return;
        }
 
        /* Just handling non-battery alerts for now */
        if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
-               switch (port->state) {
-               case SRC_READY:
-               case SNK_READY:
+               if (port->pwr_role == TYPEC_SOURCE) {
+                       port->upcoming_state = GET_STATUS_SEND;
+                       tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+               } else {
+                       /*
+                        * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+                        * SinkTxOk in time.
+                        */
+                       port->ams = GETTING_SOURCE_SINK_STATUS;
                        tcpm_set_state(port, GET_STATUS_SEND, 0);
-                       break;
-               default:
-                       tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
-                       break;
                }
+       } else {
+               tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
        }
 }
 
@@ -2440,7 +2453,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
                break;
        case PD_DATA_ALERT:
-               tcpm_handle_alert(port, msg->payload, cnt);
+               if (port->state != SRC_READY && port->state != SNK_READY)
+                       tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+                                            SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+                                            NONE_AMS, 0);
+               else
+                       tcpm_handle_alert(port, msg->payload, cnt);
                break;
        case PD_DATA_BATT_STATUS:
        case PD_DATA_GET_COUNTRY_INFO:
@@ -2764,24 +2782,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
 
        switch (type) {
        case PD_EXT_STATUS:
-               /*
-                * If PPS related events raised then get PPS status to clear
-                * (see USB PD 3.0 Spec, 6.5.2.4)
-                */
-               if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
-                   USB_PD_EXT_SDB_PPS_EVENTS)
-                       tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
-                                            GETTING_SOURCE_SINK_STATUS, 0);
-
-               else
-                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
-               break;
        case PD_EXT_PPS_STATUS:
-               /*
-                * For now the PPS status message is used to clear events
-                * and nothing more.
-                */
-               tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+               if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+                       tcpm_ams_finish(port);
+                       tcpm_set_state(port, ready_state(port), 0);
+               } else {
+                       /* unexpected Status or PPS_Status Message */
+                       tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+                                            SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+                                            NONE_AMS, 0);
+               }
                break;
        case PD_EXT_SOURCE_CAP_EXT:
        case PD_EXT_GET_BATT_CAP:
@@ -5947,6 +5957,22 @@ sink:
                        return ret;
        }
 
+       /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+       if (port->nr_snk_vdo) {
+               ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+               if (ret < 0)
+                       return ret;
+               else if (ret == 0)
+                       return -ENODATA;
+
+               port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+               ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+                                                    port->snk_vdo_v1,
+                                                    port->nr_snk_vdo_v1);
+               if (ret < 0)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -6312,6 +6338,11 @@ void tcpm_unregister_port(struct tcpm_port *port)
 {
        int i;
 
+       hrtimer_cancel(&port->send_discover_timer);
+       hrtimer_cancel(&port->enable_frs_timer);
+       hrtimer_cancel(&port->vdm_state_machine_timer);
+       hrtimer_cancel(&port->state_machine_timer);
+
        tcpm_reset_port(port);
        for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
                typec_unregister_altmode(port->port_altmode[i]);
index 79ae639..5d12533 100644 (file)
@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
                const u8 *data = (void *)msg;
                int i;
 
-               for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+               for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
                        ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
                                           data[i]);
                        if (ret)
index b433169..b7d104c 100644 (file)
@@ -1253,6 +1253,7 @@ err_unregister:
        }
 
 err_reset:
+       memset(&ucsi->cap, 0, sizeof(ucsi->cap));
        ucsi_reset_ppm(ucsi);
 err:
        return ret;
index 53ce78d..5e2e1b9 100644 (file)
@@ -2,6 +2,7 @@
 config VFIO_PCI
        tristate "VFIO support for PCI devices"
        depends on VFIO && PCI && EVENTFD
+       depends on MMU
        select VFIO_VIRQFD
        select IRQ_BYPASS_MANAGER
        help
index d57f037..70e28ef 100644 (file)
@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
                        if (len == 0xFF) {
                                len = vfio_ext_cap_len(vdev, ecap, epos);
                                if (len < 0)
-                                       return ret;
+                                       return len;
                        }
                }
 
index 361e5b5..470fcf7 100644 (file)
@@ -291,7 +291,7 @@ err_irq:
        vfio_platform_regions_cleanup(vdev);
 err_reg:
        mutex_unlock(&driver_lock);
-       module_put(THIS_MODULE);
+       module_put(vdev->parent_module);
        return ret;
 }
 
index a0747c3..a3e925a 100644 (file)
@@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
                return 0;
        }
 
-       size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
+       size = struct_size(cap_iovas, iova_ranges, iovas);
 
        cap_iovas = kzalloc(size, GFP_KERNEL);
        if (!cap_iovas)
index b292887..a591d29 100644 (file)
@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        get_page(page);
+
+       if (vmf->vma->vm_file)
+               page->mapping = vmf->vma->vm_file->f_mapping;
+       else
+               printk(KERN_ERR "no mapping available\n");
+
+       BUG_ON(!page->mapping);
        page->index = vmf->pgoff;
 
        vmf->page = page;
@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
        .page_mkwrite   = fb_deferred_io_mkwrite,
 };
 
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+       if (!PageDirty(page))
+               SetPageDirty(page);
+       return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+       .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
 
+void fb_deferred_io_open(struct fb_info *info,
+                        struct inode *inode,
+                        struct file *file)
+{
+       file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct page *page;
+       int i;
 
        BUG_ON(!fbdefio);
        cancel_delayed_work_sync(&info->deferred_work);
+
+       /* clear out the mapping that we setup */
+       for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+               page = fb_deferred_io_page(info, i);
+               page->mapping = NULL;
+       }
+
        mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
index 072780b..98f1930 100644 (file)
@@ -1415,6 +1415,10 @@ __releases(&info->lock)
                if (res)
                        module_put(info->fbops->owner);
        }
+#ifdef CONFIG_FB_DEFERRED_IO
+       if (info->fbdefio)
+               fb_deferred_io_open(info, inode, file);
+#endif
 out:
        unlock_fb_info(info);
        if (res)
index b297525..179004b 100644 (file)
@@ -203,8 +203,8 @@ static int __init afs_init(void)
                goto error_fs;
 
        afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
-       if (IS_ERR(afs_proc_symlink)) {
-               ret = PTR_ERR(afs_proc_symlink);
+       if (!afs_proc_symlink) {
+               ret = -ENOMEM;
                goto error_proc;
        }
 
index 3edb620..e9ccaa3 100644 (file)
@@ -730,7 +730,7 @@ static int afs_writepages_region(struct address_space *mapping,
                        return ret;
                }
 
-               start += ret * PAGE_SIZE;
+               start += ret;
 
                cond_resched();
        } while (wbc->nr_to_write > 0);
@@ -837,6 +837,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        struct inode *inode = file_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
        unsigned long priv;
+       vm_fault_t ret = VM_FAULT_RETRY;
 
        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 
@@ -848,14 +849,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 #ifdef CONFIG_AFS_FSCACHE
        if (PageFsCache(page) &&
            wait_on_page_fscache_killable(page) < 0)
-               return VM_FAULT_RETRY;
+               goto out;
 #endif
 
        if (wait_on_page_writeback_killable(page))
-               return VM_FAULT_RETRY;
+               goto out;
 
        if (lock_page_killable(page) < 0)
-               return VM_FAULT_RETRY;
+               goto out;
 
        /* We mustn't change page->private until writeback is complete as that
         * details the portion of the page we need to write back and we might
@@ -863,7 +864,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
         */
        if (wait_on_page_writeback_killable(page) < 0) {
                unlock_page(page);
-               return VM_FAULT_RETRY;
+               goto out;
        }
 
        priv = afs_page_dirty(page, 0, thp_size(page));
@@ -877,8 +878,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        }
        file_update_time(file);
 
+       ret = VM_FAULT_LOCKED;
+out:
        sb_end_pagefault(inode->i_sb);
-       return VM_FAULT_LOCKED;
+       return ret;
 }
 
 /*
index aa57bdc..6d5c4e4 100644 (file)
@@ -2442,16 +2442,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
        spin_lock(&sinfo->lock);
        spin_lock(&cache->lock);
        if (!--cache->ro) {
-               num_bytes = cache->length - cache->reserved -
-                           cache->pinned - cache->bytes_super -
-                           cache->zone_unusable - cache->used;
-               sinfo->bytes_readonly -= num_bytes;
                if (btrfs_is_zoned(cache->fs_info)) {
                        /* Migrate zone_unusable bytes back */
                        cache->zone_unusable = cache->alloc_offset - cache->used;
                        sinfo->bytes_zone_unusable += cache->zone_unusable;
                        sinfo->bytes_readonly -= cache->zone_unusable;
                }
+               num_bytes = cache->length - cache->reserved -
+                           cache->pinned - cache->bytes_super -
+                           cache->zone_unusable - cache->used;
+               sinfo->bytes_readonly -= num_bytes;
                list_del_init(&cache->ro_list);
        }
        spin_unlock(&cache->lock);
index d17ac30..1346d69 100644 (file)
@@ -457,7 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                int submit = 0;
-               int len;
+               int len = 0;
 
                page = compressed_pages[pg_index];
                page->mapping = inode->vfs_inode.i_mapping;
@@ -465,10 +465,17 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                        submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
                                                          0);
 
-               if (pg_index == 0 && use_append)
-                       len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0);
-               else
-                       len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               /*
+                * Page can only be added to bio if the current bio fits in
+                * stripe.
+                */
+               if (!submit) {
+                       if (pg_index == 0 && use_append)
+                               len = bio_add_zone_append_page(bio, page,
+                                                              PAGE_SIZE, 0);
+                       else
+                               len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               }
 
                page->mapping = NULL;
                if (submit || len < PAGE_SIZE) {
index c9a3036..8d386a5 100644 (file)
@@ -2648,6 +2648,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
                ret = -EINVAL;
        }
 
+       if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+                  BTRFS_FSID_SIZE)) {
+               btrfs_err(fs_info,
+               "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+                       fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+               ret = -EINVAL;
+       }
+
+       if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+           memcmp(fs_info->fs_devices->metadata_uuid,
+                  fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+               btrfs_err(fs_info,
+"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+                       fs_info->super_copy->metadata_uuid,
+                       fs_info->fs_devices->metadata_uuid);
+               ret = -EINVAL;
+       }
+
        if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
                   BTRFS_FSID_SIZE) != 0) {
                btrfs_err(fs_info,
@@ -3279,14 +3297,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 
        disk_super = fs_info->super_copy;
 
-       ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
-                      BTRFS_FSID_SIZE));
-
-       if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
-               ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
-                               fs_info->super_copy->metadata_uuid,
-                               BTRFS_FSID_SIZE));
-       }
 
        features = btrfs_super_flags(disk_super);
        if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
index f1d15b6..3d5c35e 100644 (file)
@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        trace_run_delayed_ref_head(fs_info, head, 0);
        btrfs_delayed_ref_unlock(head);
        btrfs_put_delayed_ref_head(head);
-       return 0;
+       return ret;
 }
 
 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
index 294602f..441cee7 100644 (file)
@@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
        u64 end_byte = bytenr + len;
        u64 csum_end;
        struct extent_buffer *leaf;
-       int ret;
+       int ret = 0;
        const u32 csum_size = fs_info->csum_size;
        u32 blocksize_bits = fs_info->sectorsize_bits;
 
@@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret > 0) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_del_items(trans, root, path,
                                              path->slots[0], del_nr);
                        if (ret)
-                               goto out;
+                               break;
                        if (key.offset == bytenr)
                                break;
                } else if (key.offset < bytenr && csum_end > end_byte) {
@@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_split_item(trans, root, path, &key, offset);
                        if (ret && ret != -EAGAIN) {
                                btrfs_abort_transaction(trans, ret);
-                               goto out;
+                               break;
                        }
+                       ret = 0;
 
                        key.offset = end_byte - 1;
                } else {
@@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                }
                btrfs_release_path(path);
        }
-       ret = 0;
-out:
        btrfs_free_path(path);
        return ret;
 }
 
+static int find_next_csum_offset(struct btrfs_root *root,
+                                struct btrfs_path *path,
+                                u64 *next_offset)
+{
+       const u32 nritems = btrfs_header_nritems(path->nodes[0]);
+       struct btrfs_key found_key;
+       int slot = path->slots[0] + 1;
+       int ret;
+
+       if (nritems == 0 || slot >= nritems) {
+               ret = btrfs_next_leaf(root, path);
+               if (ret < 0) {
+                       return ret;
+               } else if (ret > 0) {
+                       *next_offset = (u64)-1;
+                       return 0;
+               }
+               slot = path->slots[0];
+       }
+
+       btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+
+       if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+           found_key.type != BTRFS_EXTENT_CSUM_KEY)
+               *next_offset = (u64)-1;
+       else
+               *next_offset = found_key.offset;
+
+       return 0;
+}
+
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums)
@@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
        u64 total_bytes = 0;
        u64 csum_offset;
        u64 bytenr;
-       u32 nritems;
        u32 ins_size;
        int index = 0;
        int found_next;
@@ -981,26 +1011,10 @@ again:
                        goto insert;
                }
        } else {
-               int slot = path->slots[0] + 1;
-               /* we didn't find a csum item, insert one */
-               nritems = btrfs_header_nritems(path->nodes[0]);
-               if (!nritems || (path->slots[0] >= nritems - 1)) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0) {
-                               goto out;
-                       } else if (ret > 0) {
-                               found_next = 1;
-                               goto insert;
-                       }
-                       slot = path->slots[0];
-               }
-               btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
-               if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
-                   found_key.type != BTRFS_EXTENT_CSUM_KEY) {
-                       found_next = 1;
-                       goto insert;
-               }
-               next_offset = found_key.offset;
+               /* We didn't find a csum item, insert one. */
+               ret = find_next_csum_offset(root, path, &next_offset);
+               if (ret < 0)
+                       goto out;
                found_next = 1;
                goto insert;
        }
@@ -1056,8 +1070,48 @@ extend_csum:
                tmp = sums->len - total_bytes;
                tmp >>= fs_info->sectorsize_bits;
                WARN_ON(tmp < 1);
+               extend_nr = max_t(int, 1, tmp);
+
+               /*
+                * A log tree can already have checksum items with a subset of
+                * the checksums we are trying to log. This can happen after
+                * doing a sequence of partial writes into prealloc extents and
+                * fsyncs in between, with a full fsync logging a larger subrange
+                * of an extent for which a previous fast fsync logged a smaller
+                * subrange. And this happens in particular due to merging file
+                * extent items when we complete an ordered extent for a range
+                * covered by a prealloc extent - this is done at
+                * btrfs_mark_extent_written().
+                *
+                * So if we try to extend the previous checksum item, which has
+                * a range that ends at the start of the range we want to insert,
+                * make sure we don't extend beyond the start offset of the next
+                * checksum item. If we are at the last item in the leaf, then
+                * forget the optimization of extending and add a new checksum
+                * item - it is not worth the complexity of releasing the path,
+                * getting the first key for the next leaf, repeat the btree
+                * search, etc, because log trees are temporary anyway and it
+                * would only save a few bytes of leaf space.
+                */
+               if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+                       if (path->slots[0] + 1 >=
+                           btrfs_header_nritems(path->nodes[0])) {
+                               ret = find_next_csum_offset(root, path, &next_offset);
+                               if (ret < 0)
+                                       goto out;
+                               found_next = 1;
+                               goto insert;
+                       }
+
+                       ret = find_next_csum_offset(root, path, &next_offset);
+                       if (ret < 0)
+                               goto out;
+
+                       tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
+                       if (tmp <= INT_MAX)
+                               extend_nr = min_t(int, extend_nr, tmp);
+               }
 
-               extend_nr = max_t(int, 1, (int)tmp);
                diff = (csum_offset + extend_nr) * csum_size;
                diff = min(diff,
                           MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
index 3b10d98..55f6842 100644 (file)
@@ -1094,7 +1094,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
        int del_nr = 0;
        int del_slot = 0;
        int recow;
-       int ret;
+       int ret = 0;
        u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
@@ -1315,7 +1315,7 @@ again:
        }
 out:
        btrfs_free_path(path);
-       return 0;
+       return ret;
 }
 
 /*
index 33f1457..46f3929 100644 (file)
@@ -3000,6 +3000,18 @@ out:
        if (ret || truncated) {
                u64 unwritten_start = start;
 
+               /*
+                * If we failed to finish this ordered extent for any reason we
+                * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
+                * extent, and mark the inode with the error if it wasn't
+                * already set.  Any error during writeback would have already
+                * set the mapping error, so we need to set it if we're the ones
+                * marking this ordered extent as failed.
+                */
+               if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
+                                            &ordered_extent->flags))
+                       mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+
                if (truncated)
                        unwritten_start += logical_len;
                clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
@@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       bool need_abort = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
@@ -9135,6 +9148,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             old_idx);
                if (ret)
                        goto out_fail;
+               need_abort = true;
        }
 
        /* And now for the dest. */
@@ -9150,8 +9164,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             new_ino,
                                             btrfs_ino(BTRFS_I(old_dir)),
                                             new_idx);
-               if (ret)
+               if (ret) {
+                       if (need_abort)
+                               btrfs_abort_transaction(trans, ret);
                        goto out_fail;
+               }
        }
 
        /* Update inode version and ctime/mtime. */
index d434dc7..9178da0 100644 (file)
@@ -203,10 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
                         * inline extent's data to the page.
                         */
                        ASSERT(key.offset > 0);
-                       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                                 inline_data, size, datal,
-                                                 comp_type);
-                       goto out;
+                       goto copy_to_page;
                }
        } else if (i_size_read(dst) <= datal) {
                struct btrfs_file_extent_item *ei;
@@ -222,13 +219,10 @@ static int clone_copy_inline_extent(struct inode *dst,
                    BTRFS_FILE_EXTENT_INLINE)
                        goto copy_inline_extent;
 
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
 copy_inline_extent:
-       ret = 0;
        /*
         * We have no extent items, or we have an extent at offset 0 which may
         * or may not be inlined. All these cases are dealt the same way.
@@ -240,11 +234,13 @@ copy_inline_extent:
                 * clone. Deal with all these cases by copying the inline extent
                 * data into the respective page at the destination inode.
                 */
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
+       /*
+        * Release path before starting a new transaction so we don't hold locks
+        * that would confuse lockdep.
+        */
        btrfs_release_path(path);
        /*
         * If we end up here it means were copy the inline extent into a leaf
@@ -281,11 +277,6 @@ copy_inline_extent:
        ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
 out:
        if (!ret && !trans) {
-               /*
-                * Release path before starting a new transaction so we don't
-                * hold locks that would confuse lockdep.
-                */
-               btrfs_release_path(path);
                /*
                 * No transaction here means we copied the inline extent into a
                 * page of the destination inode.
@@ -306,6 +297,21 @@ out:
                *trans_out = trans;
 
        return ret;
+
+copy_to_page:
+       /*
+        * Release our path because we don't need it anymore and also because
+        * copy_inline_to_page() needs to reserve data and metadata, which may
+        * need to flush delalloc when we are low on available space and
+        * therefore cause a deadlock if writeback of an inline extent needs to
+        * write to the same leaf or an ordered extent completion needs to write
+        * to the same leaf.
+        */
+       btrfs_release_path(path);
+
+       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+                                 inline_data, size, datal, comp_type);
+       goto out;
 }
 
 /**
index 326be57..dbcf8bb 100644 (file)
@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
 
-                       btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       if (ret)
+                               goto out;
                }
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
 
        if (nlink != inode->i_nlink) {
                set_nlink(inode, nlink);
-               btrfs_update_inode(trans, root, BTRFS_I(inode));
+               ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+               if (ret)
+                       goto out;
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
 
@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                        break;
 
                if (ret == 1) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 
                ret = btrfs_del_item(trans, root, path);
                if (ret)
-                       goto out;
+                       break;
 
                btrfs_release_path(path);
                inode = read_one_inode(root, key.offset);
-               if (!inode)
-                       return -EIO;
+               if (!inode) {
+                       ret = -EIO;
+                       break;
+               }
 
                ret = fixup_inode_link_count(trans, root, inode);
                iput(inode);
                if (ret)
-                       goto out;
+                       break;
 
                /*
                 * fixup on a directory may create new entries,
@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                 */
                key.offset = (u64)-1;
        }
-       ret = 0;
-out:
        btrfs_release_path(path);
        return ret;
 }
@@ -3297,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         *    begins and releases it only after writing its superblock.
         */
        mutex_lock(&fs_info->tree_log_mutex);
+
+       /*
+        * The previous transaction writeout phase could have failed, and thus
+        * marked the fs in an error state.  We must not commit here, as we
+        * could have updated our generation in the super_for_commit and
+        * writing the super here would result in transid mismatches.  If there
+        * is an error here just bail.
+        */
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+               ret = -EIO;
+               btrfs_set_log_full_commit(trans);
+               btrfs_abort_transaction(trans, ret);
+               mutex_unlock(&fs_info->tree_log_mutex);
+               goto out_wake_log_root;
+       }
+
        btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
        btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
        ret = write_all_supers(fs_info, 1);
index 1bb8ee9..f1f3b10 100644 (file)
@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
        return (u32)zone;
 }
 
+static inline sector_t zone_start_sector(u32 zone_number,
+                                        struct block_device *bdev)
+{
+       return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
+}
+
+static inline u64 zone_start_physical(u32 zone_number,
+                                     struct btrfs_zoned_device_info *zone_info)
+{
+       return (u64)zone_number << zone_info->zone_size_shift;
+}
+
 /*
  * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
  * device into static sized chunks and fake a conventional zone on each of
@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
                if (sb_zone + 1 >= zone_info->nr_zones)
                        continue;
 
-               sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
-               ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+               ret = btrfs_get_dev_zones(device,
+                                         zone_start_physical(sb_zone, zone_info),
                                          &zone_info->sb_zones[sb_pos],
                                          &nr_zones);
                if (ret)
@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
        if (sb_zone + 1 >= nr_zones)
                return -ENOENT;
 
-       ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+       ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
                                  BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
                                  zones);
        if (ret < 0)
@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
                return -ENOENT;
 
        return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
-                               sb_zone << zone_sectors_shift,
+                               zone_start_sector(sb_zone, bdev),
                                zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
 }
 
@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
                        if (!(end <= sb_zone ||
                              sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
                                have_sb = true;
-                               pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+                               pos = zone_start_physical(
+                                       sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
                                break;
                        }
 
index 2868e3e..c3d8fc1 100644 (file)
@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
         * but then we need to teach dump_write() to restart and clear
         * TIF_SIGPENDING.
         */
-       return signal_pending(current);
+       return fatal_signal_pending(current) || freezing(current);
 }
 
 static void wait_for_dump_helpers(struct file *file)
index e813acf..ba7c01c 100644 (file)
@@ -893,7 +893,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
 
        copy[copy_len] = '\n';
 
-       ret = simple_read_from_buffer(user_buf, count, ppos, copy, copy_len);
+       ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
        kfree(copy);
 
        return ret;
index 77c84d6..cbf37b2 100644 (file)
@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
                ext4_ext_mark_unwritten(ex2);
 
        err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
-       if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+       if (err != -ENOSPC && err != -EDQUOT)
+               goto out;
+
+       if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
                if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
                        if (split_flag & EXT4_EXT_DATA_VALID1) {
                                err = ext4_ext_zeroout(inode, ex2);
@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
                                              ext4_ext_pblock(&orig_ex));
                }
 
-               if (err)
-                       goto fix_extent_len;
-               /* update the extent length and mark as initialized */
-               ex->ee_len = cpu_to_le16(ee_len);
-               ext4_ext_try_to_merge(handle, inode, path, ex);
-               err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-               if (err)
-                       goto fix_extent_len;
-
-               /* update extent status tree */
-               err = ext4_zeroout_es(inode, &zero_ex);
-
-               goto out;
-       } else if (err)
-               goto fix_extent_len;
-
-out:
-       ext4_ext_show_leaf(inode, path);
-       return err;
+               if (!err) {
+                       /* update the extent length and mark as initialized */
+                       ex->ee_len = cpu_to_le16(ee_len);
+                       ext4_ext_try_to_merge(handle, inode, path, ex);
+                       err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+                       if (!err)
+                               /* update extent status tree */
+                               err = ext4_zeroout_es(inode, &zero_ex);
+                       /* If we failed at this point, we don't know in which
+                        * state the extent tree exactly is so don't try to fix
+                        * length of the original extent as it may do even more
+                        * damage.
+                        */
+                       goto out;
+               }
+       }
 
 fix_extent_len:
        ex->ee_len = orig_ex.ee_len;
@@ -3260,6 +3260,9 @@ fix_extent_len:
         */
        ext4_ext_dirty(handle, inode, path + path->p_depth);
        return err;
+out:
+       ext4_ext_show_leaf(inode, path);
+       return err;
 }
 
 /*
index f98ca4f..e819522 100644 (file)
@@ -1288,28 +1288,29 @@ struct dentry_info_args {
 };
 
 static inline void tl_to_darg(struct dentry_info_args *darg,
-                               struct  ext4_fc_tl *tl)
+                             struct  ext4_fc_tl *tl, u8 *val)
 {
-       struct ext4_fc_dentry_info *fcd;
+       struct ext4_fc_dentry_info fcd;
 
-       fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
+       memcpy(&fcd, val, sizeof(fcd));
 
-       darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
-       darg->ino = le32_to_cpu(fcd->fc_ino);
-       darg->dname = fcd->fc_dname;
-       darg->dname_len = ext4_fc_tag_len(tl) -
-                       sizeof(struct ext4_fc_dentry_info);
+       darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
+       darg->ino = le32_to_cpu(fcd.fc_ino);
+       darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
+       darg->dname_len = le16_to_cpu(tl->fc_len) -
+               sizeof(struct ext4_fc_dentry_info);
 }
 
 /* Unlink replay function */
-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+                                u8 *val)
 {
        struct inode *inode, *old_parent;
        struct qstr entry;
        struct dentry_info_args darg;
        int ret = 0;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
                        darg.parent_ino, darg.dname_len);
@@ -1399,13 +1400,14 @@ out:
 }
 
 /* Link replay function */
-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+                              u8 *val)
 {
        struct inode *inode;
        struct dentry_info_args darg;
        int ret = 0;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
                        darg.parent_ino, darg.dname_len);
 
@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
 /*
  * Inode replay function
  */
-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+                               u8 *val)
 {
-       struct ext4_fc_inode *fc_inode;
+       struct ext4_fc_inode fc_inode;
        struct ext4_inode *raw_inode;
        struct ext4_inode *raw_fc_inode;
        struct inode *inode = NULL;
@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
        int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
        struct ext4_extent_header *eh;
 
-       fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
+       memcpy(&fc_inode, val, sizeof(fc_inode));
 
-       ino = le32_to_cpu(fc_inode->fc_ino);
+       ino = le32_to_cpu(fc_inode.fc_ino);
        trace_ext4_fc_replay(sb, tag, ino, 0, 0);
 
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
 
        ext4_fc_record_modified_inode(sb, ino);
 
-       raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
+       raw_fc_inode = (struct ext4_inode *)
+               (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
        ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
        if (ret)
                goto out;
 
-       inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
+       inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
        raw_inode = ext4_raw_inode(&iloc);
 
        memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
@@ -1547,14 +1551,15 @@ out:
  * inode for which we are trying to create a dentry here, should already have
  * been replayed before we start here.
  */
-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+                                u8 *val)
 {
        int ret = 0;
        struct inode *inode = NULL;
        struct inode *dir = NULL;
        struct dentry_info_args darg;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
                        darg.parent_ino, darg.dname_len);
@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
 
 /* Replay add range tag */
 static int ext4_fc_replay_add_range(struct super_block *sb,
-                               struct ext4_fc_tl *tl)
+                                   struct ext4_fc_tl *tl, u8 *val)
 {
-       struct ext4_fc_add_range *fc_add_ex;
+       struct ext4_fc_add_range fc_add_ex;
        struct ext4_extent newex, *ex;
        struct inode *inode;
        ext4_lblk_t start, cur;
@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
        struct ext4_ext_path *path = NULL;
        int ret;
 
-       fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
-       ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
+       memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
+       ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
-               le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
+               le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
                ext4_ext_get_actual_len(ex));
 
-       inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
-                               EXT4_IGET_NORMAL);
+       inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
        if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
@@ -1762,32 +1766,33 @@ next:
 
 /* Replay DEL_RANGE tag */
 static int
-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+                        u8 *val)
 {
        struct inode *inode;
-       struct ext4_fc_del_range *lrange;
+       struct ext4_fc_del_range lrange;
        struct ext4_map_blocks map;
        ext4_lblk_t cur, remaining;
        int ret;
 
-       lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
-       cur = le32_to_cpu(lrange->fc_lblk);
-       remaining = le32_to_cpu(lrange->fc_len);
+       memcpy(&lrange, val, sizeof(lrange));
+       cur = le32_to_cpu(lrange.fc_lblk);
+       remaining = le32_to_cpu(lrange.fc_len);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
-               le32_to_cpu(lrange->fc_ino), cur, remaining);
+               le32_to_cpu(lrange.fc_ino), cur, remaining);
 
-       inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
+       inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
        if (IS_ERR(inode)) {
-               jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
+               jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
                return 0;
        }
 
        ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
 
        jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
-                       inode->i_ino, le32_to_cpu(lrange->fc_lblk),
-                       le32_to_cpu(lrange->fc_len));
+                       inode->i_ino, le32_to_cpu(lrange.fc_lblk),
+                       le32_to_cpu(lrange.fc_len));
        while (remaining > 0) {
                map.m_lblk = cur;
                map.m_len = remaining;
@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
        }
 
        ret = ext4_punch_hole(inode,
-               le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
-               le32_to_cpu(lrange->fc_len) <<  sb->s_blocksize_bits);
+               le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
+               le32_to_cpu(lrange.fc_len) <<  sb->s_blocksize_bits);
        if (ret)
                jbd_debug(1, "ext4_punch_hole returned %d", ret);
        ext4_ext_replay_shrink_inode(inode,
@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_fc_replay_state *state;
        int ret = JBD2_FC_REPLAY_CONTINUE;
-       struct ext4_fc_add_range *ext;
-       struct ext4_fc_tl *tl;
-       struct ext4_fc_tail *tail;
-       __u8 *start, *end;
-       struct ext4_fc_head *head;
+       struct ext4_fc_add_range ext;
+       struct ext4_fc_tl tl;
+       struct ext4_fc_tail tail;
+       __u8 *start, *end, *cur, *val;
+       struct ext4_fc_head head;
        struct ext4_extent *ex;
 
        state = &sbi->s_fc_replay_state;
@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
        }
 
        state->fc_replay_expected_off++;
-       fc_for_each_tl(start, end, tl) {
+       for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+               memcpy(&tl, cur, sizeof(tl));
+               val = cur + sizeof(tl);
                jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
-                         tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
-               switch (le16_to_cpu(tl->fc_tag)) {
+                         tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
+               switch (le16_to_cpu(tl.fc_tag)) {
                case EXT4_FC_TAG_ADD_RANGE:
-                       ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
-                       ex = (struct ext4_extent *)&ext->fc_ex;
+                       memcpy(&ext, val, sizeof(ext));
+                       ex = (struct ext4_extent *)&ext.fc_ex;
                        ret = ext4_fc_record_regions(sb,
-                               le32_to_cpu(ext->fc_ino),
+                               le32_to_cpu(ext.fc_ino),
                                le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
                                ext4_ext_get_actual_len(ex));
                        if (ret < 0)
@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
                case EXT4_FC_TAG_INODE:
                case EXT4_FC_TAG_PAD:
                        state->fc_cur_tag++;
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                       sizeof(*tl) + ext4_fc_tag_len(tl));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                       sizeof(tl) + le16_to_cpu(tl.fc_len));
                        break;
                case EXT4_FC_TAG_TAIL:
                        state->fc_cur_tag++;
-                       tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                               sizeof(*tl) +
+                       memcpy(&tail, val, sizeof(tail));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                               sizeof(tl) +
                                                offsetof(struct ext4_fc_tail,
                                                fc_crc));
-                       if (le32_to_cpu(tail->fc_tid) == expected_tid &&
-                               le32_to_cpu(tail->fc_crc) == state->fc_crc) {
+                       if (le32_to_cpu(tail.fc_tid) == expected_tid &&
+                               le32_to_cpu(tail.fc_crc) == state->fc_crc) {
                                state->fc_replay_num_tags = state->fc_cur_tag;
                                state->fc_regions_valid =
                                        state->fc_regions_used;
@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
                        state->fc_crc = 0;
                        break;
                case EXT4_FC_TAG_HEAD:
-                       head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
-                       if (le32_to_cpu(head->fc_features) &
+                       memcpy(&head, val, sizeof(head));
+                       if (le32_to_cpu(head.fc_features) &
                                ~EXT4_FC_SUPPORTED_FEATURES) {
                                ret = -EOPNOTSUPP;
                                break;
                        }
-                       if (le32_to_cpu(head->fc_tid) != expected_tid) {
+                       if (le32_to_cpu(head.fc_tid) != expected_tid) {
                                ret = JBD2_FC_REPLAY_STOP;
                                break;
                        }
                        state->fc_cur_tag++;
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                       sizeof(*tl) + ext4_fc_tag_len(tl));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                           sizeof(tl) + le16_to_cpu(tl.fc_len));
                        break;
                default:
                        ret = state->fc_replay_num_tags ?
@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
 {
        struct super_block *sb = journal->j_private;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_fc_tl *tl;
-       __u8 *start, *end;
+       struct ext4_fc_tl tl;
+       __u8 *start, *end, *cur, *val;
        int ret = JBD2_FC_REPLAY_CONTINUE;
        struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
-       struct ext4_fc_tail *tail;
+       struct ext4_fc_tail tail;
 
        if (pass == PASS_SCAN) {
                state->fc_current_pass = PASS_SCAN;
@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
        start = (u8 *)bh->b_data;
        end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
 
-       fc_for_each_tl(start, end, tl) {
+       for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+               memcpy(&tl, cur, sizeof(tl));
+               val = cur + sizeof(tl);
+
                if (state->fc_replay_num_tags == 0) {
                        ret = JBD2_FC_REPLAY_STOP;
                        ext4_fc_set_bitmaps_and_counters(sb);
                        break;
                }
                jbd_debug(3, "Replay phase, tag:%s\n",
-                               tag2str(le16_to_cpu(tl->fc_tag)));
+                               tag2str(le16_to_cpu(tl.fc_tag)));
                state->fc_replay_num_tags--;
-               switch (le16_to_cpu(tl->fc_tag)) {
+               switch (le16_to_cpu(tl.fc_tag)) {
                case EXT4_FC_TAG_LINK:
-                       ret = ext4_fc_replay_link(sb, tl);
+                       ret = ext4_fc_replay_link(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_UNLINK:
-                       ret = ext4_fc_replay_unlink(sb, tl);
+                       ret = ext4_fc_replay_unlink(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_ADD_RANGE:
-                       ret = ext4_fc_replay_add_range(sb, tl);
+                       ret = ext4_fc_replay_add_range(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_CREAT:
-                       ret = ext4_fc_replay_create(sb, tl);
+                       ret = ext4_fc_replay_create(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_DEL_RANGE:
-                       ret = ext4_fc_replay_del_range(sb, tl);
+                       ret = ext4_fc_replay_del_range(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_INODE:
-                       ret = ext4_fc_replay_inode(sb, tl);
+                       ret = ext4_fc_replay_inode(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_PAD:
                        trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
-                               ext4_fc_tag_len(tl), 0);
+                                            le16_to_cpu(tl.fc_len), 0);
                        break;
                case EXT4_FC_TAG_TAIL:
                        trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
-                               ext4_fc_tag_len(tl), 0);
-                       tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
-                       WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
+                                            le16_to_cpu(tl.fc_len), 0);
+                       memcpy(&tail, val, sizeof(tail));
+                       WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
                        break;
                case EXT4_FC_TAG_HEAD:
                        break;
                default:
-                       trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
-                               ext4_fc_tag_len(tl), 0);
+                       trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
+                                            le16_to_cpu(tl.fc_len), 0);
                        ret = -ECANCELED;
                        break;
                }
index b77f70f..937c381 100644 (file)
@@ -153,13 +153,6 @@ struct ext4_fc_replay_state {
 #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
 #endif
 
-#define fc_for_each_tl(__start, __end, __tl)                           \
-       for (tl = (struct ext4_fc_tl *)(__start);                       \
-            (__u8 *)tl < (__u8 *)(__end);                              \
-               tl = (struct ext4_fc_tl *)((__u8 *)tl +                 \
-                                       sizeof(struct ext4_fc_tl) +     \
-                                       + le16_to_cpu(tl->fc_len)))
-
 static inline const char *tag2str(__u16 tag)
 {
        switch (tag) {
@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag)
        }
 }
 
-/* Get length of a particular tlv */
-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
-{
-       return le16_to_cpu(tl->fc_len);
-}
-
-/* Get a pointer to "value" of a tlv */
-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
-{
-       return (__u8 *)tl + sizeof(*tl);
-}
-
 #endif /* __FAST_COMMIT_H__ */
index 81a17a3..9bab7fd 100644 (file)
@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
        if (is_directory) {
                count = ext4_used_dirs_count(sb, gdp) - 1;
                ext4_used_dirs_set(sb, gdp, count);
-               percpu_counter_dec(&sbi->s_dirs_counter);
+               if (percpu_counter_initialized(&sbi->s_dirs_counter))
+                       percpu_counter_dec(&sbi->s_dirs_counter);
        }
        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
                                   EXT4_INODES_PER_GROUP(sb) / 8);
        ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
 
-       percpu_counter_inc(&sbi->s_freeinodes_counter);
+       if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
+               percpu_counter_inc(&sbi->s_freeinodes_counter);
        if (sbi->s_log_groups_per_flex) {
                struct flex_groups *fg;
 
index 3239e66..c2c22c2 100644 (file)
@@ -3217,7 +3217,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
                 */
                if (sbi->s_es->s_log_groups_per_flex >= 32) {
                        ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
-                       goto err_freesgi;
+                       goto err_freebuddy;
                }
                sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
                        BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
index afb9d05..a4af26d 100644 (file)
@@ -1376,7 +1376,8 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
        struct dx_hash_info *hinfo = &name->hinfo;
        int len;
 
-       if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding) {
+       if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+           (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
                cf_name->name = NULL;
                return 0;
        }
@@ -1427,7 +1428,8 @@ static bool ext4_match(struct inode *parent,
 #endif
 
 #ifdef CONFIG_UNICODE
-       if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent)) {
+       if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+           (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
                if (fname->cf_name.name) {
                        struct qstr cf = {.name = fname->cf_name.name,
                                          .len = fname->cf_name.len};
index 7dc94f3..d29f6aa 100644 (file)
@@ -4462,14 +4462,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        if (sb->s_blocksize != blocksize) {
+               /*
+                * bh must be released before kill_bdev(), otherwise
+                * it won't be freed and its page also. kill_bdev()
+                * is called by sb_set_blocksize().
+                */
+               brelse(bh);
                /* Validate the filesystem blocksize */
                if (!sb_set_blocksize(sb, blocksize)) {
                        ext4_msg(sb, KERN_ERR, "bad block size %d",
                                        blocksize);
+                       bh = NULL;
                        goto failed_mount;
                }
 
-               brelse(bh);
                logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
                offset = do_div(logical_sb_block, blocksize);
                bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
@@ -5202,8 +5208,9 @@ failed_mount:
                kfree(get_qf_name(sb, sbi, i));
 #endif
        fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
-       ext4_blkdev_remove(sbi);
+       /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
        brelse(bh);
+       ext4_blkdev_remove(sbi);
 out_fail:
        sb->s_fs_info = NULL;
        kfree(sbi->s_blockgroup_lock);
index 6f825de..55fcab6 100644 (file)
@@ -315,7 +315,9 @@ EXT4_ATTR_FEATURE(verity);
 #endif
 EXT4_ATTR_FEATURE(metadata_csum_seed);
 EXT4_ATTR_FEATURE(fast_commit);
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
 EXT4_ATTR_FEATURE(encrypted_casefold);
+#endif
 
 static struct attribute *ext4_feat_attrs[] = {
        ATTR_LIST(lazy_itable_init),
@@ -333,7 +335,9 @@ static struct attribute *ext4_feat_attrs[] = {
 #endif
        ATTR_LIST(metadata_csum_seed),
        ATTR_LIST(fast_commit),
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
        ATTR_LIST(encrypted_casefold),
+#endif
        NULL,
 };
 ATTRIBUTE_GROUPS(ext4_feat);
index 8a35a01..493a83e 100644 (file)
@@ -540,11 +540,9 @@ static vm_fault_t gfs2_fault(struct vm_fault *vmf)
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        vm_fault_t ret;
-       u16 state;
        int err;
 
-       state = (vmf->flags & FAULT_FLAG_WRITE) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-       gfs2_holder_init(ip->i_gl, state, 0, &gh);
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
        err = gfs2_glock_nq(&gh);
        if (err) {
                ret = block_page_mkwrite_return(err);
index 55efd3d..30dee68 100644 (file)
@@ -735,6 +735,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                __SetPageUptodate(page);
                error = huge_add_to_page_cache(page, mapping, index);
                if (unlikely(error)) {
+                       restore_reserve_on_error(h, &pseudo_vma, addr, page);
                        put_page(page);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        goto out;
index 903458a..fa8794c 100644 (file)
@@ -783,6 +783,11 @@ struct io_task_work {
        task_work_func_t        func;
 };
 
+enum {
+       IORING_RSRC_FILE                = 0,
+       IORING_RSRC_BUFFER              = 1,
+};
+
 /*
  * NOTE! Each of the iocb union members has the file pointer
  * as the first entry in their struct definition. So you can
@@ -8228,6 +8233,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 {
        int i, ret;
 
+       imu->acct_pages = 0;
        for (i = 0; i < nr_pages; i++) {
                if (!PageCompound(pages[i])) {
                        imu->acct_pages++;
@@ -9670,7 +9676,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
                        IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
                        IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
-                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
+                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+                       IORING_FEAT_RSRC_TAGS;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -9910,7 +9917,7 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
 }
 
 static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
-                                  unsigned size)
+                                  unsigned size, unsigned type)
 {
        struct io_uring_rsrc_update2 up;
 
@@ -9918,13 +9925,13 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
-       if (!up.nr)
+       if (!up.nr || up.resv)
                return -EINVAL;
-       return __io_register_rsrc_update(ctx, up.type, &up, up.nr);
+       return __io_register_rsrc_update(ctx, type, &up, up.nr);
 }
 
 static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
-                           unsigned int size)
+                           unsigned int size, unsigned int type)
 {
        struct io_uring_rsrc_register rr;
 
@@ -9935,10 +9942,10 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
        memset(&rr, 0, sizeof(rr));
        if (copy_from_user(&rr, arg, size))
                return -EFAULT;
-       if (!rr.nr)
+       if (!rr.nr || rr.resv || rr.resv2)
                return -EINVAL;
 
-       switch (rr.type) {
+       switch (type) {
        case IORING_RSRC_FILE:
                return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
                                             rr.nr, u64_to_user_ptr(rr.tags));
@@ -9960,8 +9967,10 @@ static bool io_register_op_must_quiesce(int op)
        case IORING_REGISTER_PROBE:
        case IORING_REGISTER_PERSONALITY:
        case IORING_UNREGISTER_PERSONALITY:
-       case IORING_REGISTER_RSRC:
-       case IORING_REGISTER_RSRC_UPDATE:
+       case IORING_REGISTER_FILES2:
+       case IORING_REGISTER_FILES_UPDATE2:
+       case IORING_REGISTER_BUFFERS2:
+       case IORING_REGISTER_BUFFERS_UPDATE:
                return false;
        default:
                return true;
@@ -10087,11 +10096,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
        case IORING_REGISTER_RESTRICTIONS:
                ret = io_register_restrictions(ctx, arg, nr_args);
                break;
-       case IORING_REGISTER_RSRC:
-               ret = io_register_rsrc(ctx, arg, nr_args);
+       case IORING_REGISTER_FILES2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_FILES_UPDATE2:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_BUFFERS2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
                break;
-       case IORING_REGISTER_RSRC_UPDATE:
-               ret = io_register_rsrc_update(ctx, arg, nr_args);
+       case IORING_REGISTER_BUFFERS_UPDATE:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_BUFFER);
                break;
        default:
                ret = -EINVAL;
index cfeaadf..330f657 100644 (file)
@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
 
        if (cl_init->hostname == NULL) {
                WARN_ON(1);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        /* see if the client already exists */
index 065cb04..543d916 100644 (file)
@@ -205,6 +205,7 @@ struct nfs4_exception {
        struct inode *inode;
        nfs4_stateid *stateid;
        long timeout;
+       unsigned char task_is_privileged : 1;
        unsigned char delay : 1,
                      recovering : 1,
                      retry : 1;
index 889a9f4..4271938 100644 (file)
@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
                 */
                nfs_mark_client_ready(clp, -EPERM);
        }
-       nfs_put_client(clp);
        clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
+       nfs_put_client(clp);
        return old;
 
 error:
index 0cd9658..e653654 100644 (file)
@@ -589,6 +589,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
                goto out_retry;
        }
        if (exception->recovering) {
+               if (exception->task_is_privileged)
+                       return -EDEADLOCK;
                ret = nfs4_wait_clnt_recover(clp);
                if (test_bit(NFS_MIG_FAILED, &server->mig_status))
                        return -EIO;
@@ -614,6 +616,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
                goto out_retry;
        }
        if (exception->recovering) {
+               if (exception->task_is_privileged)
+                       return -EDEADLOCK;
                rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
                if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
                        rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
@@ -3878,6 +3882,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
                        server->caps |= NFS_CAP_HARDLINKS;
                if (res.has_symlinks != 0)
                        server->caps |= NFS_CAP_SYMLINKS;
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+               if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
+                       server->caps |= NFS_CAP_SECURITY_LABEL;
+#endif
                if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
                        server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
                if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3898,10 +3906,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
                        server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
                if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
                        server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
-               if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL))
-                       server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL;
-#endif
                memcpy(server->attr_bitmask_nl, res.attr_bitmask,
                                sizeof(server->attr_bitmask));
                server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -5968,6 +5972,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
        do {
                err = __nfs4_proc_set_acl(inode, buf, buflen);
                trace_nfs4_set_acl(inode, err);
+               if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
+                       /*
+                        * no need to retry since the kernel
+                        * isn't involved in encoding the ACEs.
+                        */
+                       err = -EINVAL;
+                       break;
+               }
                err = nfs4_handle_exception(NFS_SERVER(inode), err,
                                &exception);
        } while (exception.retry);
@@ -6409,6 +6421,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        struct nfs4_exception exception = {
                .inode = data->inode,
                .stateid = &data->stateid,
+               .task_is_privileged = data->args.seq_args.sa_privileged,
        };
 
        if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6532,7 +6545,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
        data = kzalloc(sizeof(*data), GFP_NOFS);
        if (data == NULL)
                return -ENOMEM;
-       nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
 
        nfs4_state_protect(server->nfs_client,
                        NFS_SP4_MACH_CRED_CLEANUP,
@@ -6563,6 +6575,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
                }
        }
 
+       if (!data->inode)
+               nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+                                  1);
+       else
+               nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+                                  0);
        task_setup_data.callback_data = data;
        msg.rpc_argp = &data->args;
        msg.rpc_resp = &data->res;
@@ -9640,15 +9658,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
                        &task_setup_data.rpc_client, &msg);
 
        dprintk("--> %s\n", __func__);
+       lrp->inode = nfs_igrab_and_active(lrp->args.inode);
        if (!sync) {
-               lrp->inode = nfs_igrab_and_active(lrp->args.inode);
                if (!lrp->inode) {
                        nfs4_layoutreturn_release(lrp);
                        return -EAGAIN;
                }
                task_setup_data.flags |= RPC_TASK_ASYNC;
        }
-       nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
+       if (!lrp->inode)
+               nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+                                  1);
+       else
+               nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+                                  0);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index eb1ef34..ccef43e 100644 (file)
@@ -430,10 +430,6 @@ TRACE_DEFINE_ENUM(O_CLOEXEC);
                { O_NOATIME, "O_NOATIME" }, \
                { O_CLOEXEC, "O_CLOEXEC" })
 
-TRACE_DEFINE_ENUM(FMODE_READ);
-TRACE_DEFINE_ENUM(FMODE_WRITE);
-TRACE_DEFINE_ENUM(FMODE_EXEC);
-
 #define show_fmode_flags(mode) \
        __print_flags(mode, "|", \
                { ((__force unsigned long)FMODE_READ), "READ" }, \
index be5b6d2..64864fb 100644 (file)
@@ -471,7 +471,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                        info_type, fanotify_info_name(info),
                                        info->name_len, buf, count);
                if (ret < 0)
-                       return ret;
+                       goto out_close_fd;
 
                buf += ret;
                count -= ret;
@@ -519,7 +519,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                        fanotify_event_object_fh(event),
                                        info_type, dot, dot_len, buf, count);
                if (ret < 0)
-                       return ret;
+                       goto out_close_fd;
 
                buf += ret;
                count -= ret;
index f17c3d3..7756579 100644 (file)
@@ -1855,6 +1855,45 @@ out:
        return ret;
 }
 
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ *      is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+                                       u64 start, u64 len)
+{
+       int ret;
+       u64 start_block, end_block, nr_blocks;
+       u64 p_block, offset;
+       u32 cluster, p_cluster, nr_clusters;
+       struct super_block *sb = inode->i_sb;
+       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+       if (start + len < end)
+               end = start + len;
+
+       start_block = ocfs2_blocks_for_bytes(sb, start);
+       end_block = ocfs2_blocks_for_bytes(sb, end);
+       nr_blocks = end_block - start_block;
+       if (!nr_blocks)
+               return 0;
+
+       cluster = ocfs2_bytes_to_clusters(sb, start);
+       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+                               &nr_clusters, NULL);
+       if (ret)
+               return ret;
+       if (!p_cluster)
+               return 0;
+
+       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
 /*
  * Parts of this function taken from xfs_change_file_space()
  */
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
 {
        int ret;
        s64 llen;
-       loff_t size;
+       loff_t size, orig_isize;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct buffer_head *di_bh = NULL;
        handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
+       orig_isize = i_size_read(inode);
        switch (sr->l_whence) {
        case 0: /*SEEK_SET*/
                break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                sr->l_start += f_pos;
                break;
        case 2: /*SEEK_END*/
-               sr->l_start += i_size_read(inode);
+               sr->l_start += orig_isize;
                break;
        default:
                ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        default:
                ret = -EINVAL;
        }
+
+       /* zeroout eof blocks in the cluster. */
+       if (!ret && change_size && orig_isize < size) {
+               ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+                                       size - orig_isize);
+               if (!ret)
+                       i_size_write(inode, size);
+       }
        up_write(&OCFS2_I(inode)->ip_alloc_sem);
        if (ret) {
                mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
-       if (change_size && i_size_read(inode) < size)
-               i_size_write(inode, size);
-
        inode->i_ctime = inode->i_mtime = current_time(inode);
        ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
        if (ret < 0)
index 58bbf33..9cbd915 100644 (file)
@@ -2674,6 +2674,13 @@ out:
 }
 
 #ifdef CONFIG_SECURITY
+static int proc_pid_attr_open(struct inode *inode, struct file *file)
+{
+       file->private_data = NULL;
+       __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+       return 0;
+}
+
 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
                                  size_t count, loff_t *ppos)
 {
@@ -2704,7 +2711,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
        int rv;
 
        /* A task may only write when it was the opener. */
-       if (file->f_cred != current_real_cred())
+       if (file->private_data != current->mm)
                return -EPERM;
 
        rcu_read_lock();
@@ -2754,9 +2761,11 @@ out:
 }
 
 static const struct file_operations proc_pid_attr_operations = {
+       .open           = proc_pid_attr_open,
        .read           = proc_pid_attr_read,
        .write          = proc_pid_attr_write,
        .llseek         = generic_file_llseek,
+       .release        = mem_release,
 };
 
 #define LSM_DIR_OPS(LSM) \
index 40a9c10..1732541 100644 (file)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 #define PERCPU_DECRYPTED_SECTION                                       \
        . = ALIGN(PAGE_SIZE);                                           \
+       *(.data..decrypted)                                             \
        *(.data..percpu..decrypted)                                     \
        . = ALIGN(PAGE_SIZE);
 #else
index fef3ef6..e6526b1 100644 (file)
  * <20:16>  :: Reserved, Shall be set to zero
  * <15:0>   :: USB-IF assigned VID for this cable vendor
  */
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF                0
+
 /* SOP Product Type (UFP) */
 #define IDH_PTYPE_NOT_UFP       0
 #define IDH_PTYPE_HUB           1
 #define UFP_VDO_VER1_2         2
 
 /* Device Capability */
-#define DEV_USB2_CAPABLE       BIT(0)
-#define DEV_USB2_BILLBOARD     BIT(1)
-#define DEV_USB3_CAPABLE       BIT(2)
-#define DEV_USB4_CAPABLE       BIT(3)
+#define DEV_USB2_CAPABLE       (1 << 0)
+#define DEV_USB2_BILLBOARD     (1 << 1)
+#define DEV_USB3_CAPABLE       (1 << 2)
+#define DEV_USB4_CAPABLE       (1 << 3)
 
 /* Connector Type */
 #define UFP_RECEPTACLE         2
 
 /* Alternate Modes */
 #define UFP_ALTMODE_NOT_SUPP   0
-#define UFP_ALTMODE_TBT3       BIT(0)
-#define UFP_ALTMODE_RECFG      BIT(1)
-#define UFP_ALTMODE_NO_RECFG   BIT(2)
+#define UFP_ALTMODE_TBT3       (1 << 0)
+#define UFP_ALTMODE_RECFG      (1 << 1)
+#define UFP_ALTMODE_NO_RECFG   (1 << 2)
 
 /* USB Highest Speed */
 #define UFP_USB2_ONLY          0
  * <4:0>   :: Port number
  */
 #define DFP_VDO_VER1_1         1
-#define HOST_USB2_CAPABLE      BIT(0)
-#define HOST_USB3_CAPABLE      BIT(1)
-#define HOST_USB4_CAPABLE      BIT(2)
+#define HOST_USB2_CAPABLE      (1 << 0)
+#define HOST_USB3_CAPABLE      (1 << 1)
+#define HOST_USB4_CAPABLE      (1 << 2)
 #define DFP_RECEPTACLE         2
 #define DFP_CAPTIVE            3
 
         | ((pnum) & 0x1f))
 
 /*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17>    :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9>     :: SSTX2 Directionality support
+ * <8>     :: SSRX1 Directionality support
+ * <7>     :: SSRX2 Directionality support
+ * <6:5>   :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4>     :: Vbus through cable (0b == no, 1b == yes)
+ * <3>     :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0>   :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
  * ---------
  * <31:28> :: Cable HW version
  * <27:24> :: Cable FW version
  * <4:3>   :: Reserved, Shall be set to zero
  * <2:0>   :: USB highest speed
  *
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
  * ---------
  * <31:28> :: Cable HW version
  * <27:24> :: Cable FW version
 #define CABLE_VDO_VER1_0       0
 #define CABLE_VDO_VER1_3       3
 
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE            0
+#define CABLE_BTYPE            1
 #define CABLE_CTYPE            2
 #define CABLE_CAPTIVE          3
 
 #define CABLE_CURR_3A          1
 #define CABLE_CURR_5A          2
 
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY    0
+#define CABLE_USBSS_U31_GEN1   1
+#define CABLE_USBSS_U31_GEN2   2
+
 /* USB Highest Speed */
 #define CABLE_USB2_ONLY                0
 #define CABLE_USB32_GEN1       1
 #define CABLE_USB32_4_GEN2     2
 #define CABLE_USB4_GEN3                3
 
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+       (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18          \
+        | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10            \
+        | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5         \
+        | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
 #define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd)                        \
        (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21          \
         | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11    \
         | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3           \
         | (iso) << 2 | (gen))
 
+/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10>    :: SSTX2 Directionality support
+ * <9>     :: SSRX1 Directionality support
+ * <8>     :: SSRX2 Directionality support
+ * <7:5>   :: Vconn power
+ * <4>     :: Vconn power required
+ * <3>     :: Vbus power required
+ * <2:0>   :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+       (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24                        \
+        | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8      \
+        | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3               \
+        | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo)      (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo)       (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY      0
+#define AMA_USBSS_U31_GEN1     1
+#define AMA_USBSS_U31_GEN2     2
+#define AMA_USBSS_BBONLY       3
+
 /*
  * VPD VDO
  * ---------
index f180240..11e555c 100644 (file)
@@ -37,7 +37,6 @@ bool topology_scale_freq_invariant(void);
 enum scale_freq_source {
        SCALE_FREQ_SOURCE_CPUFREQ = 0,
        SCALE_FREQ_SOURCE_ARCH,
-       SCALE_FREQ_SOURCE_CPPC,
 };
 
 struct scale_freq_data {
index 565deea..8612f8f 100644 (file)
@@ -830,6 +830,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
 struct virtchnl_proto_hdrs {
        u8 tunnel_level;
+       u8 pad[3];
        /**
         * specify where protocol header start from.
         * 0 - from the outer layer
index c043b8d..183ddd5 100644 (file)
  * must end with any of these keywords:
  *   break;
  *   fallthrough;
+ *   continue;
  *   goto <label>;
  *   return [expression];
  *
index 8b2b1d6..136b8d9 100644 (file)
@@ -3,6 +3,7 @@
 #define __LINUX_ENTRYKVM_H
 
 #include <linux/entry-common.h>
+#include <linux/tick.h>
 
 /* Transfer to guest mode work */
 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
 static inline void xfer_to_guest_mode_prepare(void)
 {
        lockdep_assert_irqs_disabled();
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 }
 
 /**
index a8dccd2..ecfbcc0 100644 (file)
@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
 /* drivers/video/fb_defio.c */
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+                               struct inode *inode,
+                               struct file *file);
 extern void fb_deferred_io_cleanup(struct fb_info *info);
 extern int fb_deferred_io_fsync(struct file *file, loff_t start,
                                loff_t end, int datasync);
index 271021e..10e922c 100644 (file)
@@ -1167,8 +1167,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
  */
 static inline u32 hid_report_len(struct hid_report *report)
 {
-       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
-       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
 }
 
 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
index 232e1bd..9b0487c 100644 (file)
@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key);
-#define host1x_client_register(class) \
-       ({ \
-               static struct lock_class_key __key; \
-               __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client)                     \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+       })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client)                 \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+               __host1x_client_register(client);       \
        })
 
 int host1x_client_unregister(struct host1x_client *client);
index 9626fda..2a8ebe6 100644 (file)
@@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 
 extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
 
 static inline bool is_huge_zero_page(struct page *page)
 {
@@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
 
 static inline bool is_huge_zero_pmd(pmd_t pmd)
 {
-       return is_huge_zero_page(pmd_page(pmd));
+       return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
 }
 
 static inline bool is_huge_zero_pud(pud_t pud)
@@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
        return false;
 }
 
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+       return false;
+}
+
 static inline bool is_huge_zero_pud(pud_t pud)
 {
        return false;
index b92f25c..6504346 100644 (file)
@@ -149,6 +149,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
                                                long freed);
 bool isolate_huge_page(struct page *page, struct list_head *list);
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
 void putback_active_hugepage(struct page *page);
 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 void free_huge_page(struct page *page);
@@ -339,6 +340,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
        return false;
 }
 
+static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+       return 0;
+}
+
 static inline void putback_active_hugepage(struct page *page)
 {
 }
@@ -604,6 +610,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
                                unsigned long address);
 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
                        pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+                               unsigned long address, struct page *page);
 
 /* arch callback */
 int __init __alloc_bootmem_huge_page(struct hstate *h);
index 76102ef..8583ed3 100644 (file)
@@ -1185,7 +1185,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+       /*
+        * The index was checked originally in search_memslots.  To avoid
+        * that a malicious guest builds a Spectre gadget out of e.g. page
+        * table walks, do not let the processor speculate loads outside
+        * the guest's registered memslots.
+        */
+       unsigned long offset = gfn - slot->base_gfn;
+       offset = array_index_nospec(offset, slot->npages);
+       return slot->userspace_addr + offset * PAGE_SIZE;
 }
 
 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
index a57af87..4a59664 100644 (file)
@@ -26,9 +26,7 @@ struct bd70528_data {
        struct mutex rtc_timer_lock;
 };
 
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
+#define BD70528_BUCK_VOLTS 0x10
 #define BD70528_LDO_VOLTS 0x20
 
 #define BD70528_REG_BUCK1_EN   0x0F
index c7ab69c..3b5f3a7 100644 (file)
@@ -26,11 +26,11 @@ enum {
        BD71828_REGULATOR_AMOUNT,
 };
 
-#define BD71828_BUCK1267_VOLTS         0xEF
-#define BD71828_BUCK3_VOLTS            0x10
-#define BD71828_BUCK4_VOLTS            0x20
-#define BD71828_BUCK5_VOLTS            0x10
-#define BD71828_LDO_VOLTS              0x32
+#define BD71828_BUCK1267_VOLTS         0x100
+#define BD71828_BUCK3_VOLTS            0x20
+#define BD71828_BUCK4_VOLTS            0x40
+#define BD71828_BUCK5_VOLTS            0x20
+#define BD71828_LDO_VOLTS              0x40
 /* LDO6 is fixed 1.8V voltage */
 #define BD71828_LDO_6_VOLTAGE          1800000
 
index 236a7d0..30bb59f 100644 (file)
@@ -630,6 +630,7 @@ struct mlx4_caps {
        bool                    wol_port[MLX4_MAX_PORTS + 1];
        struct mlx4_rate_limit_caps rl_caps;
        u32                     health_buffer_addrs;
+       bool                    map_clock_to_user;
 };
 
 struct mlx4_buf_list {
index 020a8f7..f8902bc 100644 (file)
@@ -542,6 +542,10 @@ struct mlx5_core_roce {
 enum {
        MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
        MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+       /* Set during device detach to block any further devices
+        * creation/deletion on drivers rescan. Unset during device attach.
+        */
+       MLX5_PRIV_FLAGS_DETACH = 1 << 2,
 };
 
 struct mlx5_adev {
index 6d16eed..eb86e80 100644 (file)
@@ -1289,6 +1289,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
 
 #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
 
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
 enum {
        MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
        MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
index 028f442..60ffeb6 100644 (file)
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
                         struct mlx5_hairpin_params *params);
 
 void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
 #endif /* __TRANSOBJ_H__ */
index c274f75..8ae3162 100644 (file)
@@ -1719,6 +1719,7 @@ struct zap_details {
        struct address_space *check_mapping;    /* Check page->mapping if set */
        pgoff_t first_index;                    /* Lowest page->index to unmap */
        pgoff_t last_index;                     /* Highest page->index to unmap */
+       struct page *single_page;               /* Locked page to be unmapped */
 };
 
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1766,6 +1767,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
 extern int fixup_user_fault(struct mm_struct *mm,
                            unsigned long address, unsigned int fault_flags,
                            bool *unlocked);
+void unmap_mapping_page(struct page *page);
 void unmap_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t nr, bool even_cows);
 void unmap_mapping_range(struct address_space *mapping,
@@ -1786,6 +1788,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
        BUG();
        return -EFAULT;
 }
+static inline void unmap_mapping_page(struct page *page) { }
 static inline void unmap_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t nr, bool even_cows) { }
 static inline void unmap_mapping_range(struct address_space *mapping,
index 5aacc1c..8f0fb62 100644 (file)
@@ -445,13 +445,6 @@ struct mm_struct {
                 */
                atomic_t has_pinned;
 
-               /**
-                * @write_protect_seq: Locked when any thread is write
-                * protecting pages mapped by this mm to enforce a later COW,
-                * for instance during page table copying for fork().
-                */
-               seqcount_t write_protect_seq;
-
 #ifdef CONFIG_MMU
                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 #endif
@@ -460,6 +453,18 @@ struct mm_struct {
                spinlock_t page_table_lock; /* Protects page tables and some
                                             * counters
                                             */
+               /*
+                * With some kernel config, the current mmap_lock's offset
+                * inside 'mm_struct' is at 0x120, which is very optimal, as
+                * its two hot fields 'count' and 'owner' sit in 2 different
+                * cachelines,  and when mmap_lock is highly contended, both
+                * of the 2 fields will be accessed frequently, current layout
+                * will help to reduce cache bouncing.
+                *
+                * So please be careful with adding new fields before
+                * mmap_lock, which can easily push the 2 fields into one
+                * cacheline.
+                */
                struct rw_semaphore mmap_lock;
 
                struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -480,7 +485,15 @@ struct mm_struct {
                unsigned long stack_vm;    /* VM_STACK */
                unsigned long def_flags;
 
+               /**
+                * @write_protect_seq: Locked when any thread is write
+                * protecting pages mapped by this mm to enforce a later COW,
+                * for instance during page table copying for fork().
+                */
+               seqcount_t write_protect_seq;
+
                spinlock_t arg_lock; /* protect the below fields */
+
                unsigned long start_code, end_code, start_data, end_data;
                unsigned long start_brk, brk, start_stack;
                unsigned long arg_start, arg_end, env_start, env_end;
index c20211e..2430650 100644 (file)
@@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 struct device_node;
 struct irq_domain;
 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
 
 /* Arch may override this (weak) */
 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 #else  /* CONFIG_OF */
 static inline struct irq_domain *
 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
 #endif  /* CONFIG_OF */
 
 static inline struct device_node *
index 46b1378..a43047b 100644 (file)
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
  * where software maintains page access bit.
  */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+       return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
 #ifndef pte_savedwrite
 #define pte_savedwrite pte_write
 #endif
index fafc1be..9837fb0 100644 (file)
@@ -50,6 +50,7 @@ struct sysc_regbits {
        s8 emufree_shift;
 };
 
+#define SYSC_QUIRK_REINIT_ON_RESUME    BIT(27)
 #define SYSC_QUIRK_GPMC_DEBUG          BIT(26)
 #define SYSC_MODULE_QUIRK_ENA_RESETDONE        BIT(25)
 #define SYSC_MODULE_QUIRK_PRUSS                BIT(24)
index 0d47fd3..51d7f1b 100644 (file)
@@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
  * @ppm:    Parts per million, but with a 16 bit binary fractional field
  */
 
-extern s32 scaled_ppm_to_ppb(long ppm);
+extern long scaled_ppm_to_ppb(long ppm);
 
 /**
  * ptp_find_pin() - obtain the pin index of a given auxiliary function
index def5c62..8d04e7d 100644 (file)
@@ -91,6 +91,7 @@ enum ttu_flags {
 
        TTU_SPLIT_HUGE_PMD      = 0x4,  /* split huge PMD if any */
        TTU_IGNORE_MLOCK        = 0x8,  /* ignore mlock */
+       TTU_SYNC                = 0x10, /* avoid racy checks with PVMW_SYNC */
        TTU_IGNORE_HWPOISON     = 0x20, /* corrupted page is recoverable */
        TTU_BATCH_FLUSH         = 0x40, /* Batch TLB flushes where possible
                                         * and caller guarantees they will
index 6f155f9..4ab7bfc 100644 (file)
@@ -1109,6 +1109,7 @@ struct pcr_ops {
 };
 
 enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE  {ASPM_MODE_CFG, ASPM_MODE_REG};
 
 #define ASPM_L1_1_EN                   BIT(0)
 #define ASPM_L1_2_EN                   BIT(1)
@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
        u8                              card_drive_sel;
 #define ASPM_L1_EN                     0x02
        u8                              aspm_en;
+       enum ASPM_MODE                  aspm_mode;
        bool                            aspm_enabled;
 
 #define PCR_MS_PMOS                    (1 << 0)
index d2c8813..28a98fc 100644 (file)
@@ -350,11 +350,19 @@ struct load_weight {
  * Only for tasks we track a moving average of the past instantaneous
  * estimated utilization. This allows to absorb sporadic drops in utilization
  * of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
  */
 struct util_est {
        unsigned int                    enqueued;
        unsigned int                    ewma;
 #define UTIL_EST_WEIGHT_SHIFT          2
+#define UTIL_AVG_UNCHANGED             0x80000000
 } __attribute__((__aligned__(sizeof(u64))));
 
 /*
index b8fc5c5..0d8e3dc 100644 (file)
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
                            int __user *usockvec);
 extern int __sys_shutdown_sock(struct socket *sock, int how);
 extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
 #endif /* _LINUX_SOCKET_H */
index d9b7c91..6430a94 100644 (file)
 #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
 #define SWP_OFFSET_MASK        ((1UL << SWP_TYPE_SHIFT) - 1)
 
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+       if (pte_swp_soft_dirty(pte))
+               pte = pte_swp_clear_soft_dirty(pte);
+       if (pte_swp_uffd_wp(pte))
+               pte = pte_swp_clear_uffd_wp(pte);
+       return pte;
+}
+
 /*
  * Store a type+offset into a swp_entry_t in an arch-independent format
  */
@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
 {
        swp_entry_t arch_entry;
 
-       if (pte_swp_soft_dirty(pte))
-               pte = pte_swp_clear_soft_dirty(pte);
-       if (pte_swp_uffd_wp(pte))
-               pte = pte_swp_clear_uffd_wp(pte);
+       pte = pte_swp_clear_flags(pte);
        arch_entry = __pte_to_swp_entry(pte);
        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
index 7340613..1a0ff88 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/context_tracking_state.h>
 #include <linux/cpumask.h>
 #include <linux/sched.h>
+#include <linux/rcupdate.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void __init tick_init(void);
@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
                __tick_nohz_task_switch();
 }
 
+static inline void tick_nohz_user_enter_prepare(void)
+{
+       if (tick_nohz_full_cpu(smp_processor_id()))
+               rcu_nocb_flush_deferred_wakeup();
+}
+
 #endif
index bf00259..96b7ff6 100644 (file)
@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
 #define PD_T_RECEIVER_RESPONSE 15      /* 15ms max */
 #define PD_T_SOURCE_ACTIVITY   45
 #define PD_T_SINK_ACTIVITY     135
-#define PD_T_SINK_WAIT_CAP     240
+#define PD_T_SINK_WAIT_CAP     310     /* 310 - 620 ms */
 #define PD_T_PS_TRANSITION     500
 #define PD_T_SRC_TRANSITION    35
 #define PD_T_DRP_SNK           40
index 0eb83ce..b517ebc 100644 (file)
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
 #define USB_PD_EXT_SDB_EVENT_OVP               BIT(3)
 #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE                BIT(4)
 
-#define USB_PD_EXT_SDB_PPS_EVENTS      (USB_PD_EXT_SDB_EVENT_OCP |     \
-                                        USB_PD_EXT_SDB_EVENT_OTP |     \
-                                        USB_PD_EXT_SDB_EVENT_OVP)
-
 #endif /* __LINUX_USB_PD_EXT_SDB_H */
index 48ecca8..b655d86 100644 (file)
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
  * The link_support layer is used to add any Link Layer specific
  * framing.
  */
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
                        struct cflayer *link_support, int head_room,
                        struct cflayer **layer, int (**rcv_func)(
                                struct sk_buff *, struct net_device *,
index 2aa5e91..8819ff4 100644 (file)
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
  * @fcs:       Specify if checksum is used in CAIF Framing Layer.
  * @head_room: Head space needed by link specific protocol.
  */
-void
+int
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
index 14a55e0..67cce87 100644 (file)
@@ -9,4 +9,5 @@
 #include <net/caif/caif_layer.h>
 
 struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
 #endif
index 445b66c..e89530d 100644 (file)
@@ -5537,7 +5537,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
  *
  * This function iterates over the interfaces associated with a given
  * hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
  *
  * @hw: the hardware struct of which the interfaces should be iterated over
  * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -6392,7 +6392,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
 
 /**
  * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
- *                              of injected frames
+ *                              of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
  * @skb: packet injected by userspace
  * @dev: the &struct device of this 802.11 device
  */
index fa58871..bdc0459 100644 (file)
@@ -184,6 +184,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
 
 void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
 #else /* CONFIG_NET_NS */
 #include <linux/sched.h>
 #include <linux/nsproxy.h>
@@ -203,13 +206,22 @@ static inline void net_ns_get_ownership(const struct net *net,
 }
 
 static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+       return ERR_PTR(-EINVAL);
+}
 #endif /* CONFIG_NET_NS */
 
 
 extern struct list_head net_namespace_list;
 
 struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
 
 #ifdef CONFIG_SYSCTL
 void ipx_register_sysctl(void);
index 27eeb61..0a5655e 100644 (file)
@@ -1506,16 +1506,10 @@ struct nft_trans_chain {
 
 struct nft_trans_table {
        bool                            update;
-       u8                              state;
-       u32                             flags;
 };
 
 #define nft_trans_table_update(trans)  \
        (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_state(trans)   \
-       (((struct nft_trans_table *)trans->data)->state)
-#define nft_trans_table_flags(trans)   \
-       (((struct nft_trans_table *)trans->data)->flags)
 
 struct nft_trans_elem {
        struct nft_set                  *set;
index 0e962d8..7a7058f 100644 (file)
@@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
 
 static inline void sk_set_txhash(struct sock *sk)
 {
-       sk->sk_txhash = net_tx_rndhash();
+       /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+       WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
 }
 
 static inline bool sk_rethink_txhash(struct sock *sk)
@@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
 
 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
 {
-       if (sk->sk_txhash) {
+       /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+       u32 txhash = READ_ONCE(sk->sk_txhash);
+
+       if (txhash) {
                skb->l4_hash = 1;
-               skb->hash = sk->sk_txhash;
+               skb->hash = txhash;
        }
 }
 
@@ -2266,8 +2270,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
 static inline int sock_error(struct sock *sk)
 {
        int err;
-       if (likely(!sk->sk_err))
+
+       /* Avoid an atomic operation for the common case.
+        * This is racy since another cpu/thread can change sk_err under us.
+        */
+       if (likely(data_race(!sk->sk_err)))
                return 0;
+
        err = xchg(&sk->sk_err, 0);
        return -err;
 }
index 3eccb52..8341a8d 100644 (file)
@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
        (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
 
 enum tls_context_flags {
-       TLS_RX_SYNC_RUNNING = 0,
+       /* tls_device_down was called after the netdev went down, device state
+        * was released, and kTLS works in software, even though rx_conf is
+        * still TLS_HW (needed for transition).
+        */
+       TLS_RX_DEV_DEGRADED = 0,
        /* Unlike RX where resync is driven entirely by the core in TX only
         * the driver knows when things went out of sync, so we need the flag
         * to be atomic.
@@ -266,6 +270,7 @@ struct tls_context {
 
        /* cache cold stuff */
        struct proto *sk_proto;
+       struct sock *sk;
 
        void (*sk_destruct)(struct sock *sk);
 
@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
 struct sk_buff *
 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
                      struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+                        struct sk_buff *skb);
 
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
index 6de5a7f..d2a9420 100644 (file)
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
 __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
 #define __NR_mount_setattr 442
 __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
 
 #define __NR_landlock_create_ruleset 444
 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
index 7d66876..d1b3270 100644 (file)
@@ -289,6 +289,9 @@ struct sockaddr_in {
 /* Address indicating an error return. */
 #define        INADDR_NONE             ((unsigned long int) 0xffffffff)
 
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define        INADDR_DUMMY            ((unsigned long int) 0xc0000008)
+
 /* Network number for local host loopback. */
 #define        IN_LOOPBACKNET          127
 
index ee93428..225ec87 100644 (file)
 #define KEY_VOICECOMMAND               0x246   /* Listening Voice Command */
 #define KEY_ASSISTANT          0x247   /* AL Context-aware desktop assistant */
 #define KEY_KBD_LAYOUT_NEXT    0x248   /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER       0x249   /* Show/hide emoji picker (HUTRR101) */
 
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
index e1ae466..162ff99 100644 (file)
@@ -280,6 +280,7 @@ struct io_uring_params {
 #define IORING_FEAT_SQPOLL_NONFIXED    (1U << 7)
 #define IORING_FEAT_EXT_ARG            (1U << 8)
 #define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
+#define IORING_FEAT_RSRC_TAGS          (1U << 10)
 
 /*
  * io_uring_register(2) opcodes and arguments
@@ -298,8 +299,12 @@ enum {
        IORING_UNREGISTER_PERSONALITY           = 10,
        IORING_REGISTER_RESTRICTIONS            = 11,
        IORING_REGISTER_ENABLE_RINGS            = 12,
-       IORING_REGISTER_RSRC                    = 13,
-       IORING_REGISTER_RSRC_UPDATE             = 14,
+
+       /* extended with tagging */
+       IORING_REGISTER_FILES2                  = 13,
+       IORING_REGISTER_FILES_UPDATE2           = 14,
+       IORING_REGISTER_BUFFERS2                = 15,
+       IORING_REGISTER_BUFFERS_UPDATE          = 16,
 
        /* this goes last */
        IORING_REGISTER_LAST
@@ -312,14 +317,10 @@ struct io_uring_files_update {
        __aligned_u64 /* __s32 * */ fds;
 };
 
-enum {
-       IORING_RSRC_FILE                = 0,
-       IORING_RSRC_BUFFER              = 1,
-};
-
 struct io_uring_rsrc_register {
-       __u32 type;
        __u32 nr;
+       __u32 resv;
+       __u64 resv2;
        __aligned_u64 data;
        __aligned_u64 tags;
 };
@@ -335,8 +336,8 @@ struct io_uring_rsrc_update2 {
        __u32 resv;
        __aligned_u64 data;
        __aligned_u64 tags;
-       __u32 type;
        __u32 nr;
+       __u32 resv2;
 };
 
 /* Skip updating fd indexes set to this value in the fd table */
index f0c35ce..4fe842c 100644 (file)
@@ -54,7 +54,7 @@
 #define VIRTIO_ID_SOUND                        25 /* virtio sound */
 #define VIRTIO_ID_FS                   26 /* virtio filesystem */
 #define VIRTIO_ID_PMEM                 27 /* virtio pmem */
-#define VIRTIO_ID_BT                   28 /* virtio bluetooth */
 #define VIRTIO_ID_MAC80211_HWSIM       29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_BT                   40 /* virtio bluetooth */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
index eb01e12..e9c42a1 100644 (file)
@@ -1537,7 +1537,7 @@ static noinline void __init kernel_init_freeable(void)
         */
        set_mems_allowed(node_states[N_MEMORY]);
 
-       cad_pid = task_pid(current);
+       cad_pid = get_pid(task_pid(current));
 
        smp_prepare_cpus(setup_max_cpus);
 
index 7344349..a2f1f15 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/jiffies.h>
 #include <linux/pid_namespace.h>
 #include <linux/proc_ns.h>
+#include <linux/security.h>
 
 #include "../../lib/kstrtox.h"
 
@@ -1069,11 +1070,13 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return &bpf_probe_read_kernel_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return &bpf_probe_read_kernel_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_str_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
        case BPF_FUNC_snprintf:
index 94ba516..c6a2757 100644 (file)
@@ -6483,6 +6483,27 @@ struct bpf_sanitize_info {
        bool mask_to_left;
 };
 
+static struct bpf_verifier_state *
+sanitize_speculative_path(struct bpf_verifier_env *env,
+                         const struct bpf_insn *insn,
+                         u32 next_idx, u32 curr_idx)
+{
+       struct bpf_verifier_state *branch;
+       struct bpf_reg_state *regs;
+
+       branch = push_stack(env, next_idx, curr_idx, true);
+       if (branch && insn) {
+               regs = branch->frame[branch->curframe]->regs;
+               if (BPF_SRC(insn->code) == BPF_K) {
+                       mark_reg_unknown(env, regs, insn->dst_reg);
+               } else if (BPF_SRC(insn->code) == BPF_X) {
+                       mark_reg_unknown(env, regs, insn->dst_reg);
+                       mark_reg_unknown(env, regs, insn->src_reg);
+               }
+       }
+       return branch;
+}
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                            struct bpf_insn *insn,
                            const struct bpf_reg_state *ptr_reg,
@@ -6566,12 +6587,26 @@ do_sim:
                tmp = *dst_reg;
                *dst_reg = *ptr_reg;
        }
-       ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+       ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+                                       env->insn_idx);
        if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
        return !ret ? REASON_STACK : 0;
 }
 
+static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
+{
+       struct bpf_verifier_state *vstate = env->cur_state;
+
+       /* If we simulate paths under speculation, we don't update the
+        * insn as 'seen' such that when we verify unreachable paths in
+        * the non-speculative domain, sanitize_dead_code() can still
+        * rewrite/sanitize them.
+        */
+       if (!vstate->speculative)
+               env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+}
+
 static int sanitize_err(struct bpf_verifier_env *env,
                        const struct bpf_insn *insn, int reason,
                        const struct bpf_reg_state *off_reg,
@@ -8750,14 +8785,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                if (err)
                        return err;
        }
+
        if (pred == 1) {
-               /* only follow the goto, ignore fall-through */
+               /* Only follow the goto, ignore fall-through. If needed, push
+                * the fall-through branch for simulation under speculative
+                * execution.
+                */
+               if (!env->bypass_spec_v1 &&
+                   !sanitize_speculative_path(env, insn, *insn_idx + 1,
+                                              *insn_idx))
+                       return -EFAULT;
                *insn_idx += insn->off;
                return 0;
        } else if (pred == 0) {
-               /* only follow fall-through branch, since
-                * that's where the program will go
+               /* Only follow the fall-through branch, since that's where the
+                * program will go. If needed, push the goto branch for
+                * simulation under speculative execution.
                 */
+               if (!env->bypass_spec_v1 &&
+                   !sanitize_speculative_path(env, insn,
+                                              *insn_idx + insn->off + 1,
+                                              *insn_idx))
+                       return -EFAULT;
                return 0;
        }
 
@@ -10630,7 +10679,7 @@ static int do_check(struct bpf_verifier_env *env)
                }
 
                regs = cur_regs(env);
-               env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+               sanitize_mark_insn_seen(env);
                prev_insn_idx = env->insn_idx;
 
                if (class == BPF_ALU || class == BPF_ALU64) {
@@ -10857,7 +10906,7 @@ process_bpf_exit:
                                        return err;
 
                                env->insn_idx++;
-                               env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+                               sanitize_mark_insn_seen(env);
                        } else {
                                verbose(env, "invalid BPF_LD mode\n");
                                return -EINVAL;
@@ -11366,6 +11415,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
 {
        struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
        struct bpf_insn *insn = new_prog->insnsi;
+       u32 old_seen = old_data[off].seen;
        u32 prog_len;
        int i;
 
@@ -11386,7 +11436,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
        memcpy(new_data + off + cnt - 1, old_data + off,
               sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
        for (i = off; i < off + cnt - 1; i++) {
-               new_data[i].seen = env->pass_cnt;
+               /* Expand insni[off]'s seen count to the patched range. */
+               new_data[i].seen = old_seen;
                new_data[i].zext_dst = insn_has_def32(env, insn + i);
        }
        env->insn_aux_data = new_data;
@@ -12710,6 +12761,9 @@ static void free_states(struct bpf_verifier_env *env)
  * insn_aux_data was touched. These variables are compared to clear temporary
  * data from failed pass. For testing and experiments do_check_common() can be
  * run multiple times even when prior attempt to verify is unsuccessful.
+ *
+ * Note that special handling is needed on !env->bypass_spec_v1 if this is
+ * ever called outside of error path with subsequent program rejection.
  */
 static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
 {
index 8190b6b..1f274d7 100644 (file)
@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
        struct cgroup *cgrp = kn->priv;
        int ret;
 
+       /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
+       if (strchr(new_name_str, '\n'))
+               return -EINVAL;
+
        if (kernfs_type(kn) != KERNFS_DIR)
                return -ENOTDIR;
        if (kn->parent != new_parent)
index 825284b..684a606 100644 (file)
@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
        VMCOREINFO_STRUCT_SIZE(mem_section);
        VMCOREINFO_OFFSET(mem_section, section_mem_map);
+       VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
        VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
 #endif
        VMCOREINFO_STRUCT_SIZE(page);
index a0b3b04..bf16395 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/highmem.h>
 #include <linux/livepatch.h>
 #include <linux/audit.h>
+#include <linux/tick.h>
 
 #include "common.h"
 
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                local_irq_disable_exit_to_user();
 
                /* Check if any of the above work has queued a deferred wakeup */
-               rcu_nocb_flush_deferred_wakeup();
+               tick_nohz_user_enter_prepare();
 
                ti_work = READ_ONCE(current_thread_info()->flags);
        }
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
        lockdep_assert_irqs_disabled();
 
        /* Flush pending rcuog wakeup before the last need_resched() check */
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 
        if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
                ti_work = exit_to_user_mode_loop(regs, ti_work);
index 6fee4a7..fe88d6e 100644 (file)
@@ -4609,7 +4609,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                ctx = &cpuctx->ctx;
                get_ctx(ctx);
+               raw_spin_lock_irqsave(&ctx->lock, flags);
                ++ctx->pin_count;
+               raw_spin_unlock_irqrestore(&ctx->lock, flags);
 
                return ctx;
        }
index 23a7a0b..db8c248 100644 (file)
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
        if (!irq_work_claim(work))
                return false;
 
-       /*record irq_work call stack in order to print it in KASAN reports*/
-       kasan_record_aux_stack(work);
-
        /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
        __irq_work_queue_local(work);
index 7a14146..9423218 100644 (file)
@@ -391,6 +391,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
        /* No obstacles. */
        return vprintk_default(fmt, args);
 }
+EXPORT_SYMBOL(vprintk);
 
 void __init printk_safe_init(void)
 {
@@ -411,4 +412,3 @@ void __init printk_safe_init(void)
        /* Flush pending messages that did not have scheduled IRQ works. */
        printk_safe_flush();
 }
-EXPORT_SYMBOL(vprintk);
index 5226cc2..4ca80df 100644 (file)
@@ -6389,7 +6389,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
 {
        return __sched_setscheduler(p, attr, false, true);
 }
-EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
 
 /**
  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
index 9c882f2..c5aacbd 100644 (file)
@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
 #define __P(F) __PS(#F, F)
 #define   P(F) __PS(#F, p->F)
+#define   PM(F, M) __PS(#F, p->F & (M))
 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
 #define __PN(F) __PSN(#F, F)
 #define   PN(F) __PSN(#F, p->F)
@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
        P(se.avg.util_avg);
        P(se.avg.last_update_time);
        P(se.avg.util_est.ewma);
-       P(se.avg.util_est.enqueued);
+       PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
 #endif
 #ifdef CONFIG_UCLAMP_TASK
        __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
index 3248e24..bfaa6e1 100644 (file)
@@ -3298,6 +3298,24 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_FAIR_GROUP_SCHED
+
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+       if (cfs_rq->load.weight)
+               return false;
+
+       if (cfs_rq->avg.load_sum)
+               return false;
+
+       if (cfs_rq->avg.util_sum)
+               return false;
+
+       if (cfs_rq->avg.runnable_sum)
+               return false;
+
+       return true;
+}
+
 /**
  * update_tg_load_avg - update the tg's load avg
  * @cfs_rq: the cfs_rq whose avg changed
@@ -3499,10 +3517,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
        unsigned long load_avg;
        u64 load_sum = 0;
-       s64 delta_sum;
        u32 divider;
 
        if (!runnable_sum)
@@ -3549,13 +3566,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, divider);
 
-       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
-       delta_avg = load_avg - se->avg.load_avg;
+       delta = load_avg - se->avg.load_avg;
 
        se->avg.load_sum = runnable_sum;
        se->avg.load_avg = load_avg;
-       add_positive(&cfs_rq->avg.load_avg, delta_avg);
-       add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+       add_positive(&cfs_rq->avg.load_avg, delta);
+       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3766,11 +3783,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
  */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+       /*
+        * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
+        * See ___update_load_avg() for details.
+        */
+       u32 divider = get_pelt_divider(&cfs_rq->avg);
+
        dequeue_load_avg(cfs_rq, se);
        sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
-       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+       cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
        sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
-       sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+       cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
 
        add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
 
@@ -3902,7 +3925,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
 {
        struct util_est ue = READ_ONCE(p->se.avg.util_est);
 
-       return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
+       return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
 }
 
 static inline unsigned long task_util_est(struct task_struct *p)
@@ -4002,7 +4025,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
         * Reset EWMA on utilization increases, the moving average is used only
         * to smooth utilization decreases.
         */
-       ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+       ue.enqueued = task_util(p);
        if (sched_feat(UTIL_EST_FASTUP)) {
                if (ue.ewma < ue.enqueued) {
                        ue.ewma = ue.enqueued;
@@ -4051,6 +4074,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
        ue.ewma  += last_ewma_diff;
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
 done:
+       ue.enqueued |= UTIL_AVG_UNCHANGED;
        WRITE_ONCE(p->se.avg.util_est, ue);
 
        trace_sched_util_est_se_tp(&p->se);
@@ -4085,6 +4109,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 
 #else /* CONFIG_SMP */
 
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+       return true;
+}
+
 #define UPDATE_TG      0x0
 #define SKIP_AGE_LOAD  0x0
 #define DO_ATTACH      0x0
@@ -4743,8 +4772,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
                cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
                                             cfs_rq->throttled_clock_task;
 
-               /* Add cfs_rq with already running entity in the list */
-               if (cfs_rq->nr_running >= 1)
+               /* Add cfs_rq with load or one or more already running entities to the list */
+               if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
                        list_add_leaf_cfs_rq(cfs_rq);
        }
 
@@ -7990,23 +8019,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-{
-       if (cfs_rq->load.weight)
-               return false;
-
-       if (cfs_rq->avg.load_sum)
-               return false;
-
-       if (cfs_rq->avg.util_sum)
-               return false;
-
-       if (cfs_rq->avg.runnable_sum)
-               return false;
-
-       return true;
-}
-
 static bool __update_blocked_fair(struct rq *rq, bool *done)
 {
        struct cfs_rq *cfs_rq, *pos;
@@ -8030,7 +8042,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
                /* Propagate pending load changes to the parent, if any: */
                se = cfs_rq->tg->se[cpu];
                if (se && !skip_blocked_update(se))
-                       update_load_avg(cfs_rq_of(se), se, 0);
+                       update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
 
                /*
                 * There can be a lot of idle CPU cgroups.  Don't let fully
index 1462846..cfe94ff 100644 (file)
@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
        return LOAD_AVG_MAX - 1024 + avg->period_contrib;
 }
 
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
 static inline void cfs_se_util_change(struct sched_avg *avg)
 {
        unsigned int enqueued;
@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
        if (!sched_feat(UTIL_EST))
                return;
 
-       /* Avoid store if the flag has been already set */
+       /* Avoid store if the flag has been already reset */
        enqueued = avg->util_est.enqueued;
        if (!(enqueued & UTIL_AVG_UNCHANGED))
                return;
index 828b091..6784f27 100644 (file)
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
 bool tick_nohz_full_running;
 EXPORT_SYMBOL_GPL(tick_nohz_full_running);
 static atomic_t tick_dep_mask;
index d2d7cf6..7a52bc1 100644 (file)
@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 static __always_inline int
 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 {
-       int ret = security_locked_down(LOCKDOWN_BPF_READ);
+       int ret;
 
-       if (unlikely(ret < 0))
-               goto fail;
        ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
-               goto fail;
-       return ret;
-fail:
-       memset(dst, 0, size);
+               memset(dst, 0, size);
        return ret;
 }
 
@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 static __always_inline int
 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 {
-       int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
-       if (unlikely(ret < 0))
-               goto fail;
+       int ret;
 
        /*
         * The strncpy_from_kernel_nofault() call will likely not fill the
@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
         */
        ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
-               goto fail;
-
-       return ret;
-fail:
-       memset(dst, 0, size);
+               memset(dst, 0, size);
        return ret;
 }
 
@@ -1011,16 +999,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return &bpf_probe_read_kernel_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return &bpf_probe_read_kernel_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_str_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
-               return &bpf_probe_read_compat_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
-               return &bpf_probe_read_compat_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_compat_str_proto;
 #endif
 #ifdef CONFIG_CGROUPS
        case BPF_FUNC_get_current_cgroup_id:
index 2e8a3fd..72ef4dc 100644 (file)
@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
 
 static void print_ip_ins(const char *fmt, const unsigned char *p)
 {
+       char ins[MCOUNT_INSN_SIZE];
        int i;
 
+       if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+               printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+               return;
+       }
+
        printk(KERN_CONT "%s", fmt);
 
        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
-               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+               printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
 }
 
 enum ftrace_bug_type ftrace_bug_type;
index a21ef9c..d23a09d 100644 (file)
@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
 };
 static struct saved_cmdlines_buffer *savedcmd;
 
-/* temporary disable recording */
-static atomic_t trace_record_taskinfo_disabled __read_mostly;
-
 static inline char *get_saved_cmdlines(int idx)
 {
        return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
 {
        if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
                return true;
-       if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
-               return true;
        if (!__this_cpu_read(trace_taskinfo_save))
                return true;
        return false;
@@ -2736,7 +2731,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
            (entry = this_cpu_read(trace_buffered_event))) {
                /* Try to use the per cpu buffer first */
                val = this_cpu_inc_return(trace_buffered_event_cnt);
-               if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+               if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
                        trace_event_setup(entry, type, trace_ctx);
                        entry->array[0] = len;
                        return entry;
@@ -3998,9 +3993,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
                return ERR_PTR(-EBUSY);
 #endif
 
-       if (!iter->snapshot)
-               atomic_inc(&trace_record_taskinfo_disabled);
-
        if (*pos != iter->pos) {
                iter->ent = NULL;
                iter->cpu = 0;
@@ -4043,9 +4035,6 @@ static void s_stop(struct seq_file *m, void *p)
                return;
 #endif
 
-       if (!iter->snapshot)
-               atomic_dec(&trace_record_taskinfo_disabled);
-
        trace_access_unlock(iter->cpu_file);
        trace_event_read_unlock();
 }
index c1637f9..4702efb 100644 (file)
@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
        prev_time = READ_ONCE(trace_clock_struct.prev_time);
        now = sched_clock_cpu(this_cpu);
 
-       /* Make sure that now is always greater than prev_time */
+       /* Make sure that now is always greater than or equal to prev_time */
        if ((s64)(now - prev_time) < 0)
-               now = prev_time + 1;
+               now = prev_time;
 
        /*
         * If in an NMI context then dont risk lockups and simply return
@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
                /* Reread prev_time in case it was already updated */
                prev_time = READ_ONCE(trace_clock_struct.prev_time);
                if ((s64)(now - prev_time) < 0)
-                       now = prev_time + 1;
+                       now = prev_time;
 
                trace_clock_struct.prev_time = now;
 
index 47cfa05..9f852a8 100644 (file)
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
 /**
  * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
  * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
-       or the previous crc64 value if computing incrementally.
      or the previous crc64 value if computing incrementally.
  * @p: pointer to buffer over which CRC64 is run
  * @len: length of buffer @p
  */
index 05efe98..297d1b3 100644 (file)
@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PMD advanced\n");
        /* Align the address wrt HPAGE_PMD_SIZE */
-       vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
+       vaddr &= HPAGE_PMD_MASK;
 
        pgtable_trans_huge_deposit(mm, pmdp, pgtable);
 
@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PUD advanced\n");
        /* Align the address wrt HPAGE_PUD_SIZE */
-       vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
+       vaddr &= HPAGE_PUD_MASK;
 
        set_pud_at(mm, vaddr, pudp, pud);
        pudp_set_wrprotect(mm, vaddr, pudp);
index 63ed6b2..6d2a011 100644 (file)
@@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
 
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
 
 bool transparent_hugepage_enabled(struct vm_area_struct *vma)
 {
@@ -98,6 +99,7 @@ retry:
                __free_pages(zero_page, compound_order(zero_page));
                goto retry;
        }
+       WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
 
        /* We take additional reference here. It will be put back by shrinker */
        atomic_set(&huge_zero_refcount, 2);
@@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
                struct page *zero_page = xchg(&huge_zero_page, NULL);
                BUG_ON(zero_page == NULL);
+               WRITE_ONCE(huge_zero_pfn, ~0UL);
                __free_pages(zero_page, compound_order(zero_page));
                return HPAGE_PMD_NR;
        }
@@ -2044,7 +2047,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        count_vm_event(THP_SPLIT_PMD);
 
        if (!vma_is_anonymous(vma)) {
-               _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+               old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
                /*
                 * We are going to unmap this huge page. So
                 * just go ahead and zap it
@@ -2053,16 +2056,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        zap_deposited_table(mm, pmd);
                if (vma_is_special_huge(vma))
                        return;
-               page = pmd_page(_pmd);
-               if (!PageDirty(page) && pmd_dirty(_pmd))
-                       set_page_dirty(page);
-               if (!PageReferenced(page) && pmd_young(_pmd))
-                       SetPageReferenced(page);
-               page_remove_rmap(page, true);
-               put_page(page);
+               if (unlikely(is_pmd_migration_entry(old_pmd))) {
+                       swp_entry_t entry;
+
+                       entry = pmd_to_swp_entry(old_pmd);
+                       page = migration_entry_to_page(entry);
+               } else {
+                       page = pmd_page(old_pmd);
+                       if (!PageDirty(page) && pmd_dirty(old_pmd))
+                               set_page_dirty(page);
+                       if (!PageReferenced(page) && pmd_young(old_pmd))
+                               SetPageReferenced(page);
+                       page_remove_rmap(page, true);
+                       put_page(page);
+               }
                add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
                return;
-       } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+       }
+
+       if (is_huge_zero_pmd(*pmd)) {
                /*
                 * FIXME: Do we want to invalidate secondary mmu by calling
                 * mmu_notifier_invalidate_range() see comments below inside
@@ -2338,17 +2350,17 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
 
 static void unmap_page(struct page *page)
 {
-       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
                TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
-       bool unmap_success;
 
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
        if (PageAnon(page))
                ttu_flags |= TTU_SPLIT_FREEZE;
 
-       unmap_success = try_to_unmap(page, ttu_flags);
-       VM_BUG_ON_PAGE(!unmap_success, page);
+       try_to_unmap(page, ttu_flags);
+
+       VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
 }
 
 static void remap_page(struct page *page, unsigned int nr)
@@ -2659,7 +2671,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        struct deferred_split *ds_queue = get_deferred_split_queue(head);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
-       int count, mapcount, extra_pins, ret;
+       int extra_pins, ret;
        pgoff_t end;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
@@ -2718,7 +2730,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        unmap_page(head);
-       VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
        /* block interrupt reentry in xa_lock and spinlock */
        local_irq_disable();
@@ -2736,9 +2747,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock(&ds_queue->split_queue_lock);
-       count = page_count(head);
-       mapcount = total_mapcount(head);
-       if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
+       if (page_ref_freeze(head, 1 + extra_pins)) {
                if (!list_empty(page_deferred_list(head))) {
                        ds_queue->split_queue_len--;
                        list_del(page_deferred_list(head));
@@ -2758,16 +2767,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                __split_huge_page(page, list, end);
                ret = 0;
        } else {
-               if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-                       pr_alert("total_mapcount: %u, page_count(): %u\n",
-                                       mapcount, count);
-                       if (PageTail(page))
-                               dump_page(head, NULL);
-                       dump_page(page, "total_mapcount(head) > 0");
-                       BUG();
-               }
                spin_unlock(&ds_queue->split_queue_lock);
-fail:          if (mapping)
+fail:
+               if (mapping)
                        xa_unlock(&mapping->i_pages);
                local_irq_enable();
                remap_page(head, thp_nr_pages(head));
index 95918f4..e0a5f9c 100644 (file)
@@ -1793,7 +1793,7 @@ retry:
                        SetPageHWPoison(page);
                        ClearPageHWPoison(head);
                }
-               remove_hugetlb_page(h, page, false);
+               remove_hugetlb_page(h, head, false);
                h->max_huge_pages--;
                spin_unlock_irq(&hugetlb_lock);
                update_and_free_page(h, head);
@@ -2121,12 +2121,18 @@ out:
  * be restored when a newly allocated huge page must be freed.  It is
  * to be called after calling vma_needs_reservation to determine if a
  * reservation exists.
+ *
+ * vma_del_reservation is used in error paths where an entry in the reserve
+ * map was created during huge page allocation and must be removed.  It is to
+ * be called after calling vma_needs_reservation to determine if a reservation
+ * exists.
  */
 enum vma_resv_mode {
        VMA_NEEDS_RESV,
        VMA_COMMIT_RESV,
        VMA_END_RESV,
        VMA_ADD_RESV,
+       VMA_DEL_RESV,
 };
 static long __vma_reservation_common(struct hstate *h,
                                struct vm_area_struct *vma, unsigned long addr,
@@ -2170,11 +2176,21 @@ static long __vma_reservation_common(struct hstate *h,
                        ret = region_del(resv, idx, idx + 1);
                }
                break;
+       case VMA_DEL_RESV:
+               if (vma->vm_flags & VM_MAYSHARE) {
+                       region_abort(resv, idx, idx + 1, 1);
+                       ret = region_del(resv, idx, idx + 1);
+               } else {
+                       ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+                       /* region_add calls of range 1 should never fail. */
+                       VM_BUG_ON(ret < 0);
+               }
+               break;
        default:
                BUG();
        }
 
-       if (vma->vm_flags & VM_MAYSHARE)
+       if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
                return ret;
        /*
         * We know private mapping must have HPAGE_RESV_OWNER set.
@@ -2222,25 +2238,39 @@ static long vma_add_reservation(struct hstate *h,
        return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
 }
 
+static long vma_del_reservation(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long addr)
+{
+       return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
+}
+
 /*
- * This routine is called to restore a reservation on error paths.  In the
- * specific error paths, a huge page was allocated (via alloc_huge_page)
- * and is about to be freed.  If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set
- * HPageRestoreReserve in the newly allocated page.  When the page is freed
- * via free_huge_page, the global reservation count will be incremented if
- * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
- * reserve map.  Adjust the reserve map here to be consistent with global
- * reserve count adjustments to be made by free_huge_page.
+ * This routine is called to restore reservation information on error paths.
+ * It should ONLY be called for pages allocated via alloc_huge_page(), and
+ * the hugetlb mutex should remain held when calling this routine.
+ *
+ * It handles two specific cases:
+ * 1) A reservation was in place and the page consumed the reservation.
+ *    HPageRestoreReserve is set in the page.
+ * 2) No reservation was in place for the page, so HPageRestoreReserve is
+ *    not set.  However, alloc_huge_page always updates the reserve map.
+ *
+ * In case 1, free_huge_page later in the error path will increment the
+ * global reserve count.  But, free_huge_page does not have enough context
+ * to adjust the reservation map.  This case deals primarily with private
+ * mappings.  Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.  Make sure the
+ * reserve map indicates there is a reservation present.
+ *
+ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
  */
-static void restore_reserve_on_error(struct hstate *h,
-                       struct vm_area_struct *vma, unsigned long address,
-                       struct page *page)
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+                       unsigned long address, struct page *page)
 {
-       if (unlikely(HPageRestoreReserve(page))) {
-               long rc = vma_needs_reservation(h, vma, address);
+       long rc = vma_needs_reservation(h, vma, address);
 
-               if (unlikely(rc < 0)) {
+       if (HPageRestoreReserve(page)) {
+               if (unlikely(rc < 0))
                        /*
                         * Rare out of memory condition in reserve map
                         * manipulation.  Clear HPageRestoreReserve so that
@@ -2253,16 +2283,57 @@ static void restore_reserve_on_error(struct hstate *h,
                         * accounting of reserve counts.
                         */
                        ClearHPageRestoreReserve(page);
-               } else if (rc) {
-                       rc = vma_add_reservation(h, vma, address);
-                       if (unlikely(rc < 0))
+               else if (rc)
+                       (void)vma_add_reservation(h, vma, address);
+               else
+                       vma_end_reservation(h, vma, address);
+       } else {
+               if (!rc) {
+                       /*
+                        * This indicates there is an entry in the reserve map
+                        * added by alloc_huge_page.  We know it was added
+                        * before the alloc_huge_page call, otherwise
+                        * HPageRestoreReserve would be set on the page.
+                        * Remove the entry so that a subsequent allocation
+                        * does not consume a reservation.
+                        */
+                       rc = vma_del_reservation(h, vma, address);
+                       if (rc < 0)
                                /*
-                                * See above comment about rare out of
-                                * memory condition.
+                                * VERY rare out of memory condition.  Since
+                                * we can not delete the entry, set
+                                * HPageRestoreReserve so that the reserve
+                                * count will be incremented when the page
+                                * is freed.  This reserve will be consumed
+                                * on a subsequent allocation.
                                 */
-                               ClearHPageRestoreReserve(page);
+                               SetHPageRestoreReserve(page);
+               } else if (rc < 0) {
+                       /*
+                        * Rare out of memory condition from
+                        * vma_needs_reservation call.  Memory allocation is
+                        * only attempted if a new entry is needed.  Therefore,
+                        * this implies there is not an entry in the
+                        * reserve map.
+                        *
+                        * For shared mappings, no entry in the map indicates
+                        * no reservation.  We are done.
+                        */
+                       if (!(vma->vm_flags & VM_MAYSHARE))
+                               /*
+                                * For private mappings, no entry indicates
+                                * a reservation is present.  Since we can
+                                * not add an entry, set SetHPageRestoreReserve
+                                * on the page so reserve count will be
+                                * incremented when freed.  This reserve will
+                                * be consumed on a subsequent allocation.
+                                */
+                               SetHPageRestoreReserve(page);
                } else
-                       vma_end_reservation(h, vma, address);
+                       /*
+                        * No reservation present, do nothing
+                        */
+                        vma_end_reservation(h, vma, address);
        }
 }
 
@@ -4037,6 +4108,8 @@ again:
                                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                                entry = huge_ptep_get(src_pte);
                                if (!pte_same(src_pte_old, entry)) {
+                                       restore_reserve_on_error(h, vma, addr,
+                                                               new);
                                        put_page(new);
                                        /* dst_entry won't change as in child */
                                        goto again;
@@ -4889,10 +4962,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                if (!page)
                        goto out;
        } else if (!*pagep) {
-               ret = -ENOMEM;
+               /* If a page already exists, then it's UFFDIO_COPY for
+                * a non-missing case. Return -EEXIST.
+                */
+               if (vm_shared &&
+                   hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+                       ret = -EEXIST;
+                       goto out;
+               }
+
                page = alloc_huge_page(dst_vma, dst_addr, 0);
-               if (IS_ERR(page))
+               if (IS_ERR(page)) {
+                       ret = -ENOMEM;
                        goto out;
+               }
 
                ret = copy_huge_page_from_user(page,
                                                (const void __user *) src_addr,
@@ -4996,6 +5079,7 @@ out_release_unlock:
        if (vm_shared || is_continue)
                unlock_page(page);
 out_release_nounlock:
+       restore_reserve_on_error(h, dst_vma, dst_addr, page);
        put_page(page);
        goto out;
 }
@@ -5847,6 +5931,21 @@ unlock:
        return ret;
 }
 
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+       int ret = 0;
+
+       *hugetlb = false;
+       spin_lock_irq(&hugetlb_lock);
+       if (PageHeadHuge(page)) {
+               *hugetlb = true;
+               if (HPageFreed(page) || HPageMigratable(page))
+                       ret = get_page_unless_zero(page);
+       }
+       spin_unlock_irq(&hugetlb_lock);
+       return ret;
+}
+
 void putback_active_hugepage(struct page *page)
 {
        spin_lock_irq(&hugetlb_lock);
index 2f11829..e8fdb53 100644 (file)
@@ -384,27 +384,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
 /*
- * At what user virtual address is page expected in @vma?
+ * At what user virtual address is page expected in vma?
+ * Returns -EFAULT if all of the page is outside the range of vma.
+ * If page is a compound head, the entire compound page is considered.
  */
 static inline unsigned long
-__vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address(struct page *page, struct vm_area_struct *vma)
 {
-       pgoff_t pgoff = page_to_pgoff(page);
-       return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+       pgoff_t pgoff;
+       unsigned long address;
+
+       VM_BUG_ON_PAGE(PageKsm(page), page);    /* KSM page->index unusable */
+       pgoff = page_to_pgoff(page);
+       if (pgoff >= vma->vm_pgoff) {
+               address = vma->vm_start +
+                       ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+               /* Check for address beyond vma (or wrapped through 0?) */
+               if (address < vma->vm_start || address >= vma->vm_end)
+                       address = -EFAULT;
+       } else if (PageHead(page) &&
+                  pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
+               /* Test above avoids possibility of wrap to 0 on 32-bit */
+               address = vma->vm_start;
+       } else {
+               address = -EFAULT;
+       }
+       return address;
 }
 
+/*
+ * Then at what user virtual address will none of the page be found in vma?
+ * Assumes that vma_address() already returned a good starting address.
+ * If page is a compound head, the entire compound page is considered.
+ */
 static inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address_end(struct page *page, struct vm_area_struct *vma)
 {
-       unsigned long start, end;
-
-       start = __vma_address(page, vma);
-       end = start + thp_size(page) - PAGE_SIZE;
-
-       /* page should be within @vma mapping range */
-       VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
-
-       return max(start, vma->vm_start);
+       pgoff_t pgoff;
+       unsigned long address;
+
+       VM_BUG_ON_PAGE(PageKsm(page), page);    /* KSM page->index unusable */
+       pgoff = page_to_pgoff(page) + compound_nr(page);
+       address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+       /* Check for address beyond vma (or wrapped through 0?) */
+       if (address < vma->vm_start || address > vma->vm_end)
+               address = vma->vm_end;
+       return address;
 }
 
 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
index c4605ac..348f31d 100644 (file)
@@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
 /**
  * kasan_populate_early_shadow - populate shadow memory region with
  *                               kasan_early_shadow_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end   - end of the memory range to populate
+ * @shadow_start: start of the memory range to populate
+ * @shadow_end: end of the memory range to populate
  */
 int __ref kasan_populate_early_shadow(const void *shadow_start,
                                        const void *shadow_end)
index e18fbbd..4d21ac4 100644 (file)
@@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
                 * During low activity with no allocations we might wait a
                 * while; let's avoid the hung task warning.
                 */
-               wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
-                                  sysctl_hung_task_timeout_secs * HZ / 2);
+               wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+                                       sysctl_hung_task_timeout_secs * HZ / 2);
        } else {
-               wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
+               wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
        }
 
        /* Disable static key and reset timer. */
index 85ad98c..0143d32 100644 (file)
@@ -949,6 +949,17 @@ static int page_action(struct page_state *ps, struct page *p,
        return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
 }
 
+/*
+ * Return true if a page type of a given page is supported by hwpoison
+ * mechanism (while handling could fail), otherwise false.  This function
+ * does not return true for hugetlb or device memory pages, so it's assumed
+ * to be called only in the context where we never have such pages.
+ */
+static inline bool HWPoisonHandlable(struct page *page)
+{
+       return PageLRU(page) || __PageMovable(page);
+}
+
 /**
  * __get_hwpoison_page() - Get refcount for memory error handling:
  * @page:      raw error page (hit by memory error)
@@ -959,8 +970,22 @@ static int page_action(struct page_state *ps, struct page *p,
 static int __get_hwpoison_page(struct page *page)
 {
        struct page *head = compound_head(page);
+       int ret = 0;
+       bool hugetlb = false;
+
+       ret = get_hwpoison_huge_page(head, &hugetlb);
+       if (hugetlb)
+               return ret;
 
-       if (!PageHuge(head) && PageTransHuge(head)) {
+       /*
+        * This check prevents from calling get_hwpoison_unless_zero()
+        * for any unsupported type of page in order to reduce the risk of
+        * unexpected races caused by taking a page refcount.
+        */
+       if (!HWPoisonHandlable(head))
+               return 0;
+
+       if (PageTransHuge(head)) {
                /*
                 * Non anonymous thp exists only in allocation/free time. We
                 * can't handle such a case correctly, so let's give it up.
@@ -1017,7 +1042,7 @@ try_again:
                        ret = -EIO;
                }
        } else {
-               if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
+               if (PageHuge(p) || HWPoisonHandlable(p)) {
                        ret = 1;
                } else {
                        /*
@@ -1527,7 +1552,12 @@ try_again:
                return 0;
        }
 
-       if (!PageTransTail(p) && !PageLRU(p))
+       /*
+        * __munlock_pagevec may clear a writeback page's LRU flag without
+        * page_lock. We need wait writeback completion for this page or it
+        * may trigger vfs BUG while evict inode.
+        */
+       if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
                goto identify_page_state;
 
        /*
index 730daa0..486f4a2 100644 (file)
@@ -1361,7 +1361,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                        else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
                        /* fall through */
+               } else if (details && details->single_page &&
+                          PageTransCompound(details->single_page) &&
+                          next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
+                       spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
+                       /*
+                        * Take and drop THP pmd lock so that we cannot return
+                        * prematurely, while zap_huge_pmd() has cleared *pmd,
+                        * but not yet decremented compound_mapcount().
+                        */
+                       spin_unlock(ptl);
                }
+
                /*
                 * Here there can be other concurrent MADV_DONTNEED or
                 * trans huge page faults running, and if the pmd is
@@ -2939,6 +2950,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
+               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3235,6 +3247,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
        }
 }
 
+/**
+ * unmap_mapping_page() - Unmap single page from processes.
+ * @page: The locked page to be unmapped.
+ *
+ * Unmap this page from any userspace process which still has it mmaped.
+ * Typically, for efficiency, the range of nearby pages has already been
+ * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
+ * truncation or invalidation holds the lock on a page, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_page()
+ * to unmap it finally.
+ */
+void unmap_mapping_page(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       struct zap_details details = { };
+
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(PageTail(page));
+
+       details.check_mapping = mapping;
+       details.first_index = page->index;
+       details.last_index = page->index + thp_nr_pages(page) - 1;
+       details.single_page = page;
+
+       i_mmap_lock_write(mapping);
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+               unmap_mapping_range_tree(&mapping->i_mmap, &details);
+       i_mmap_unlock_write(mapping);
+}
+
 /**
  * unmap_mapping_pages() - Unmap pages from processes.
  * @mapping: The address space containing pages to be unmapped.
@@ -3602,6 +3644,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
+       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3786,6 +3829,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
+       else
+               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index b234c3f..41ff2c9 100644 (file)
@@ -295,6 +295,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
                goto out;
 
        page = migration_entry_to_page(entry);
+       page = compound_head(page);
 
        /*
         * Once page cache replacement of page migration started, page_count
index aaa1655..d1f5de1 100644 (file)
@@ -9158,6 +9158,8 @@ bool take_page_off_buddy(struct page *page)
                        del_page_from_free_list(page_head, zone, page_order);
                        break_down_buddy_pages(zone, page_head, page, 0,
                                                page_order, migratetype);
+                       if (!is_migrate_isolate(migratetype))
+                               __mod_zone_freepage_state(zone, -1, migratetype);
                        ret = true;
                        break;
                }
index 2cf01d9..e37bd43 100644 (file)
@@ -212,23 +212,34 @@ restart:
                        pvmw->ptl = NULL;
                }
        } else if (!pmd_present(pmde)) {
+               /*
+                * If PVMW_SYNC, take and drop THP pmd lock so that we
+                * cannot return prematurely, while zap_huge_pmd() has
+                * cleared *pmd but not decremented compound_mapcount().
+                */
+               if ((pvmw->flags & PVMW_SYNC) &&
+                   PageTransCompound(pvmw->page)) {
+                       spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+                       spin_unlock(ptl);
+               }
                return false;
        }
        if (!map_pte(pvmw))
                goto next_pte;
        while (1) {
+               unsigned long end;
+
                if (check_pte(pvmw))
                        return true;
 next_pte:
                /* Seek to next pte only makes sense for THP */
                if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
                        return not_found(pvmw);
+               end = vma_address_end(pvmw->page, pvmw->vma);
                do {
                        pvmw->address += PAGE_SIZE;
-                       if (pvmw->address >= pvmw->vma->vm_end ||
-                           pvmw->address >=
-                                       __vma_address(pvmw->page, pvmw->vma) +
-                                       thp_size(pvmw->page))
+                       if (pvmw->address >= end)
                                return not_found(pvmw);
                        /* Did we cross page table boundary? */
                        if (pvmw->address % PMD_SIZE == 0) {
@@ -266,14 +277,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
                .vma = vma,
                .flags = PVMW_SYNC,
        };
-       unsigned long start, end;
-
-       start = __vma_address(page, vma);
-       end = start + thp_size(page) - PAGE_SIZE;
 
-       if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+       pvmw.address = vma_address(page, vma);
+       if (pvmw.address == -EFAULT)
                return 0;
-       pvmw.address = max(start, vma->vm_start);
        if (!page_vma_mapped_walk(&pvmw))
                return 0;
        page_vma_mapped_walk_done(&pvmw);
index c2210e1..4e640ba 100644 (file)
@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
 {
        pmd_t pmd;
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-       VM_BUG_ON(!pmd_present(*pmdp));
-       /* Below assumes pmd_present() is true */
-       VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+       VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+                          !pmd_devmap(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
index 693a610..e05c300 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
-       unsigned long address;
        if (PageAnon(page)) {
                struct anon_vma *page__anon_vma = page_anon_vma(page);
                /*
@@ -717,15 +716,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
                if (!vma->anon_vma || !page__anon_vma ||
                    vma->anon_vma->root != page__anon_vma->root)
                        return -EFAULT;
-       } else if (page->mapping) {
-               if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
-                       return -EFAULT;
-       } else
+       } else if (!vma->vm_file) {
                return -EFAULT;
-       address = __vma_address(page, vma);
-       if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+       } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
                return -EFAULT;
-       return address;
+       }
+
+       return vma_address(page, vma);
 }
 
 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
                                0, vma, vma->vm_mm, address,
-                               min(vma->vm_end, address + page_size(page)));
+                               vma_address_end(page, vma));
        mmu_notifier_invalidate_range_start(&range);
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -1405,6 +1402,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        struct mmu_notifier_range range;
        enum ttu_flags flags = (enum ttu_flags)(long)arg;
 
+       /*
+        * When racing against e.g. zap_pte_range() on another cpu,
+        * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+        * try_to_unmap() may return false when it is about to become true,
+        * if page table locking is skipped: use TTU_SYNC to wait for that.
+        */
+       if (flags & TTU_SYNC)
+               pvmw.flags = PVMW_SYNC;
+
        /* munlock has nothing to gain from examining un-locked vmas */
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
                return true;
@@ -1426,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         * Note that the page can not be free in this function as call of
         * try_to_unmap() must hold a reference on the page.
         */
+       range.end = PageKsm(page) ?
+                       address + PAGE_SIZE : vma_address_end(page, vma);
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
-                               address,
-                               min(vma->vm_end, address + page_size(page)));
+                               address, range.end);
        if (PageHuge(page)) {
                /*
                 * If sharing is possible, start and end will be adjusted
@@ -1777,7 +1784,13 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
        else
                rmap_walk(page, &rwc);
 
-       return !page_mapcount(page) ? true : false;
+       /*
+        * When racing against e.g. zap_pte_range() on another cpu,
+        * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+        * try_to_unmap() may return false when it is about to become true,
+        * if page table locking is skipped: use TTU_SYNC to wait for that.
+        */
+       return !page_mapcount(page);
 }
 
 /**
@@ -1874,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
 
+               VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
 
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
@@ -1928,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                        pgoff_start, pgoff_end) {
                unsigned long address = vma_address(page, vma);
 
+               VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
 
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
index a4a5714..7cab776 100644 (file)
@@ -97,8 +97,7 @@ EXPORT_SYMBOL(kmem_cache_size);
 #ifdef CONFIG_DEBUG_VM
 static int kmem_cache_sanity_check(const char *name, unsigned int size)
 {
-       if (!name || in_interrupt() || size < sizeof(void *) ||
-               size > KMALLOC_MAX_SIZE) {
+       if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
                pr_err("kmem_cache_create(%s) integrity check failed\n", name);
                return -EINVAL;
        }
index 3f96e09..61bd40e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/bit_spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/swab.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include "slab.h"
@@ -712,15 +713,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+               print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
                              s->red_left_pad);
        else if (p > addr + 16)
                print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section(KERN_ERR, "Object ", p,
+       print_section(KERN_ERR,         "Object   ", p,
                      min_t(unsigned int, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section(KERN_ERR, "Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone  ", p + s->object_size,
                        s->inuse - s->object_size);
 
        off = get_info_end(s);
@@ -732,7 +733,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section(KERN_ERR, "Padding ", p + off,
+               print_section(KERN_ERR, "Padding  ", p + off,
                              size_from_object(s) - off);
 
        dump_stack();
@@ -909,11 +910,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
        u8 *endobject = object + s->object_size;
 
        if (s->flags & SLAB_RED_ZONE) {
-               if (!check_bytes_and_report(s, page, object, "Redzone",
+               if (!check_bytes_and_report(s, page, object, "Left Redzone",
                        object - s->red_left_pad, val, s->red_left_pad))
                        return 0;
 
-               if (!check_bytes_and_report(s, page, object, "Redzone",
+               if (!check_bytes_and_report(s, page, object, "Right Redzone",
                        endobject, val, s->inuse - s->object_size))
                        return 0;
        } else {
@@ -928,7 +929,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
                if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
                        (!check_bytes_and_report(s, page, p, "Poison", p,
                                        POISON_FREE, s->object_size - 1) ||
-                        !check_bytes_and_report(s, page, p, "Poison",
+                        !check_bytes_and_report(s, page, p, "End Poison",
                                p + s->object_size - 1, POISON_END, 1)))
                        return 0;
                /*
@@ -3689,7 +3690,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
        slab_flags_t flags = s->flags;
        unsigned int size = s->object_size;
-       unsigned int freepointer_area;
        unsigned int order;
 
        /*
@@ -3698,13 +3698,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
         * the possible location of the free pointer.
         */
        size = ALIGN(size, sizeof(void *));
-       /*
-        * This is the area of the object where a freepointer can be
-        * safely written. If redzoning adds more to the inuse size, we
-        * can't use that portion for writing the freepointer, so
-        * s->offset must be limited within this for the general case.
-        */
-       freepointer_area = size;
 
 #ifdef CONFIG_SLUB_DEBUG
        /*
@@ -3730,19 +3723,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
        /*
         * With that we have determined the number of bytes in actual use
-        * by the object. This is the potential offset to the free pointer.
+        * by the object and redzoning.
         */
        s->inuse = size;
 
-       if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
-               s->ctor)) {
+       if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+           ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
+           s->ctor) {
                /*
                 * Relocate free pointer after the object if it is not
                 * permitted to overwrite the first word of the object on
                 * kmem_cache_free.
                 *
                 * This is the case if we do RCU, have a constructor or
-                * destructor or are poisoning the objects.
+                * destructor, are poisoning the objects, or are
+                * redzoning an object smaller than sizeof(void *).
                 *
                 * The assumption that s->offset >= s->inuse means free
                 * pointer is outside of the object is used in the
@@ -3751,13 +3746,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
                 */
                s->offset = size;
                size += sizeof(void *);
-       } else if (freepointer_area > sizeof(void *)) {
+       } else {
                /*
                 * Store freelist pointer near middle of object to keep
                 * it away from the edges of the object to avoid small
                 * sized over/underflows from neighboring allocations.
                 */
-               s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+               s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
        }
 
 #ifdef CONFIG_SLUB_DEBUG
index b2ada9d..55c18af 100644 (file)
@@ -344,6 +344,15 @@ size_t mem_section_usage_size(void)
        return sizeof(struct mem_section_usage) + usemap_size();
 }
 
+static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
+{
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+       return __pa_symbol(pgdat);
+#else
+       return __pa(pgdat);
+#endif
+}
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static struct mem_section_usage * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
@@ -362,7 +371,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
        limit = goal + (1UL << PA_SECTION_SHIFT);
        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 again:
@@ -390,7 +399,7 @@ static void __init check_usemap_section_nr(int nid,
        }
 
        usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
-       pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+       pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
        if (usemap_snr == pgdat_snr)
                return;
 
index 149e774..996afa8 100644 (file)
@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
 
 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
 {
-       return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
+       return pte_same(pte_swp_clear_flags(pte), swp_pte);
 }
 
 /*
index 95af244..234ddd8 100644 (file)
@@ -167,13 +167,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  */
-static void
-truncate_cleanup_page(struct address_space *mapping, struct page *page)
+static void truncate_cleanup_page(struct page *page)
 {
-       if (page_mapped(page)) {
-               unsigned int nr = thp_nr_pages(page);
-               unmap_mapping_pages(mapping, page->index, nr, false);
-       }
+       if (page_mapped(page))
+               unmap_mapping_page(page);
 
        if (page_has_private(page))
                do_invalidatepage(page, 0, thp_size(page));
@@ -218,7 +215,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
        if (page->mapping != mapping)
                return -EIO;
 
-       truncate_cleanup_page(mapping, page);
+       truncate_cleanup_page(page);
        delete_from_page_cache(page);
        return 0;
 }
@@ -325,7 +322,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                index = indices[pagevec_count(&pvec) - 1] + 1;
                truncate_exceptional_pvec_entries(mapping, &pvec, indices);
                for (i = 0; i < pagevec_count(&pvec); i++)
-                       truncate_cleanup_page(mapping, pvec.pages[i]);
+                       truncate_cleanup_page(pvec.pages[i]);
                delete_from_page_cache_batch(mapping, &pvec);
                for (i = 0; i < pagevec_count(&pvec); i++)
                        unlock_page(pvec.pages[i]);
@@ -639,6 +636,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                continue;
                        }
 
+                       if (!did_range_unmap && page_mapped(page)) {
+                               /*
+                                * If page is mapped, before taking its lock,
+                                * zap the rest of the file in one hit.
+                                */
+                               unmap_mapping_pages(mapping, index,
+                                               (1 + end - index), false);
+                               did_range_unmap = 1;
+                       }
+
                        lock_page(page);
                        WARN_ON(page_to_index(page) != index);
                        if (page->mapping != mapping) {
@@ -646,23 +653,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                continue;
                        }
                        wait_on_page_writeback(page);
-                       if (page_mapped(page)) {
-                               if (!did_range_unmap) {
-                                       /*
-                                        * Zap the rest of the file in one hit.
-                                        */
-                                       unmap_mapping_pages(mapping, index,
-                                               (1 + end - index), false);
-                                       did_range_unmap = 1;
-                               } else {
-                                       /*
-                                        * Just zap this page
-                                        */
-                                       unmap_mapping_pages(mapping, index,
-                                                               1, false);
-                               }
-                       }
+
+                       if (page_mapped(page))
+                               unmap_mapping_page(page);
                        BUG_ON(page_mapped(page));
+
                        ret2 = do_launder_page(mapping, page);
                        if (ret2 == 0) {
                                if (!invalidate_complete_page2(mapping, page))
index be18af4..c7236da 100644 (file)
@@ -768,7 +768,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
        if (a && a->status & ATIF_PROBE) {
                a->status |= ATIF_PROBE_FAIL;
                /*
-                * we do not respond to probe or request packets for
+                * we do not respond to probe or request packets of
                 * this address while we are probing this address
                 */
                goto unlock;
index 789f257..fc8be49 100644 (file)
@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        if (WARN_ON(!forw_packet->if_outgoing))
                return;
 
-       if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
+       if (forw_packet->if_outgoing->soft_iface != soft_iface) {
+               pr_warn("%s: soft interface switch for queued OGM\n", __func__);
                return;
+       }
 
        if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
                return;
index fd12f16..7d71d10 100644 (file)
@@ -1610,8 +1610,13 @@ setup_failed:
        } else {
                /* Init failed, cleanup */
                flush_work(&hdev->tx_work);
-               flush_work(&hdev->cmd_work);
+
+               /* Since hci_rx_work() is possible to awake new cmd_work
+                * it should be flushed first to avoid unexpected call of
+                * hci_cmd_work()
+                */
                flush_work(&hdev->rx_work);
+               flush_work(&hdev->cmd_work);
 
                skb_queue_purge(&hdev->cmd_q);
                skb_queue_purge(&hdev->rx_q);
index 251b912..eed0dd0 100644 (file)
@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
                /* Detach sockets from device */
                read_lock(&hci_sk_list.lock);
                sk_for_each(sk, &hci_sk_list.head) {
-                       bh_lock_sock_nested(sk);
+                       lock_sock(sk);
                        if (hci_pi(sk)->hdev == hdev) {
                                hci_pi(sk)->hdev = NULL;
                                sk->sk_err = EPIPE;
@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
 
                                hci_dev_put(hdev);
                        }
-                       bh_unlock_sock(sk);
+                       release_sock(sk);
                }
                read_unlock(&hci_sk_list.lock);
        }
index 372e3b2..7dd51da 100644 (file)
@@ -3229,7 +3229,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
 {
        struct l2cap_chan *chan;
 
-       bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan);
+       BT_DBG("pchan %p", pchan);
 
        chan = l2cap_chan_create();
        if (!chan)
@@ -3250,7 +3250,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
         */
        atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
 
-       bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan);
+       BT_DBG("created chan %p", chan);
 
        return chan;
 }
@@ -3354,7 +3354,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
 {
        struct smp_dev *smp;
 
-       bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan);
+       BT_DBG("chan %p", chan);
 
        smp = chan->data;
        if (smp) {
index 7ce8a77..e013d33 100644 (file)
@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
 #endif
 
 struct br_tunnel_info {
-       __be64                  tunnel_id;
-       struct metadata_dst     *tunnel_dst;
+       __be64                          tunnel_id;
+       struct metadata_dst __rcu       *tunnel_dst;
 };
 
 /* private vlan flags */
index 0d3a8c0..0101744 100644 (file)
@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
                                      br_vlan_tunnel_rht_params);
 }
 
+static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
+{
+       struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
+
+       WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
+       RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
+       dst_release(&tdst->dst);
+}
+
 void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
                          struct net_bridge_vlan *vlan)
 {
-       if (!vlan->tinfo.tunnel_dst)
+       if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
                return;
        rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
                               br_vlan_tunnel_rht_params);
-       vlan->tinfo.tunnel_id = 0;
-       dst_release(&vlan->tinfo.tunnel_dst->dst);
-       vlan->tinfo.tunnel_dst = NULL;
+       vlan_tunnel_info_release(vlan);
 }
 
 static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
                                  struct net_bridge_vlan *vlan, u32 tun_id)
 {
-       struct metadata_dst *metadata = NULL;
+       struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
        __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
        int err;
 
-       if (vlan->tinfo.tunnel_dst)
+       if (metadata)
                return -EEXIST;
 
        metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
                return -EINVAL;
 
        metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
-       vlan->tinfo.tunnel_dst = metadata;
-       vlan->tinfo.tunnel_id = key;
+       rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
+       WRITE_ONCE(vlan->tinfo.tunnel_id, key);
 
        err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
                                            br_vlan_tunnel_rht_params);
@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
 
        return 0;
 out:
-       dst_release(&vlan->tinfo.tunnel_dst->dst);
-       vlan->tinfo.tunnel_dst = NULL;
-       vlan->tinfo.tunnel_id = 0;
+       vlan_tunnel_info_release(vlan);
 
        return err;
 }
@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
                                 struct net_bridge_vlan *vlan)
 {
+       struct metadata_dst *tunnel_dst;
+       __be64 tunnel_id;
        int err;
 
-       if (!vlan || !vlan->tinfo.tunnel_id)
+       if (!vlan)
                return 0;
 
-       if (unlikely(!skb_vlan_tag_present(skb)))
+       tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
+       if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
                return 0;
 
        skb_dst_drop(skb);
@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
        if (err)
                return err;
 
-       skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+       tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
+       if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
+               skb_dst_set(skb, &tunnel_dst->dst);
 
        return 0;
 }
index c10e5a5..4401397 100644 (file)
@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
        caifd_put(caifd);
 }
 
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
                     struct cflayer *link_support, int head_room,
                     struct cflayer **layer,
                     int (**rcv_func)(struct sk_buff *, struct net_device *,
@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        enum cfcnfg_phy_preference pref;
        struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
        struct caif_device_entry_list *caifdevs;
+       int res;
 
        caifdevs = caif_device_list(dev_net(dev));
        caifd = caif_device_alloc(dev);
        if (!caifd)
-               return;
+               return -ENOMEM;
        *layer = &caifd->layer;
        spin_lock_init(&caifd->flow_lock);
 
@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        strlcpy(caifd->layer.name, dev->name,
                sizeof(caifd->layer.name));
        caifd->layer.transmit = transmit;
-       cfcnfg_add_phy_layer(cfg,
+       res = cfcnfg_add_phy_layer(cfg,
                                dev,
                                &caifd->layer,
                                pref,
@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        mutex_unlock(&caifdevs->lock);
        if (rcv_func)
                *rcv_func = receive;
+       return res;
 }
 EXPORT_SYMBOL(caif_enroll_dev);
 
@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        struct cflayer *layer, *link_support;
        int head_room = 0;
        struct caif_device_entry_list *caifdevs;
+       int res;
 
        cfg = get_cfcnfg(dev_net(dev));
        caifdevs = caif_device_list(dev_net(dev));
@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                                break;
                        }
                }
-               caif_enroll_dev(dev, caifdev, link_support, head_room,
+               res = caif_enroll_dev(dev, caifdev, link_support, head_room,
                                &layer, NULL);
+               if (res)
+                       cfserl_release(link_support);
                caifdev->flowctrl = dev_flowctrl;
                break;
 
index a0116b9..b02e129 100644 (file)
@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
        return (struct cflayer *) this;
 }
 
+static void cfusbl_release(struct cflayer *layer)
+{
+       kfree(layer);
+}
+
 static struct packet_type caif_usb_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_802_EX1),
 };
@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        struct cflayer *layer, *link_support;
        struct usbnet *usbnet;
        struct usb_device *usbdev;
+       int res;
 
        /* Check whether we have a NCM device, and find its VID/PID. */
        if (!(dev->dev.parent && dev->dev.parent->driver &&
@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        if (dev->num_tx_queues > 1)
                pr_warn("USB device uses more than one tx queue\n");
 
-       caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+       res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
                        &layer, &caif_usb_type.func);
+       if (res)
+               goto err;
+
        if (!pack_added)
                dev_add_pack(&caif_usb_type);
        pack_added = true;
@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        strlcpy(layer->name, dev->name, sizeof(layer->name));
 
        return 0;
+err:
+       cfusbl_release(link_support);
+       return res;
 }
 
 static struct notifier_block caif_device_notifier = {
index 399239a..cac30e6 100644 (file)
@@ -450,7 +450,7 @@ unlock:
        rcu_read_unlock();
 }
 
-void
+int
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
 {
        struct cflayer *frml;
        struct cfcnfg_phyinfo *phyinfo = NULL;
-       int i;
+       int i, res = 0;
        u8 phyid;
 
        mutex_lock(&cnfg->lock);
@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                        goto got_phyid;
        }
        pr_warn("Too many CAIF Link Layers (max 6)\n");
+       res = -EEXIST;
        goto out;
 
 got_phyid:
        phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
-       if (!phyinfo)
+       if (!phyinfo) {
+               res = -ENOMEM;
                goto out_err;
+       }
 
        phy_layer->id = phyid;
        phyinfo->pref = pref;
@@ -492,8 +495,10 @@ got_phyid:
 
        frml = cffrml_create(phyid, fcs);
 
-       if (!frml)
+       if (!frml) {
+               res = -ENOMEM;
                goto out_err;
+       }
        phyinfo->frm_layer = frml;
        layer_set_up(frml, cnfg->mux);
 
@@ -511,11 +516,12 @@ got_phyid:
        list_add_rcu(&phyinfo->node, &cnfg->phys);
 out:
        mutex_unlock(&cnfg->lock);
-       return;
+       return res;
 
 out_err:
        kfree(phyinfo);
        mutex_unlock(&cnfg->lock);
+       return res;
 }
 EXPORT_SYMBOL(cfcnfg_add_phy_layer);
 
index e11725a..40cd57a 100644 (file)
@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                           int phyid);
 
+void cfserl_release(struct cflayer *layer)
+{
+       kfree(layer);
+}
+
 struct cflayer *cfserl_create(int instance, bool use_stx)
 {
        struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
index 909b9e6..f3e4d95 100644 (file)
@@ -125,7 +125,7 @@ struct bcm_sock {
        struct sock sk;
        int bound;
        int ifindex;
-       struct notifier_block notifier;
+       struct list_head notifier;
        struct list_head rx_ops;
        struct list_head tx_ops;
        unsigned long dropped_usr_msgs;
@@ -133,6 +133,10 @@ struct bcm_sock {
        char procname [32]; /* inode number in decimal with \0 */
 };
 
+static LIST_HEAD(bcm_notifier_list);
+static DEFINE_SPINLOCK(bcm_notifier_lock);
+static struct bcm_sock *bcm_busy_notifier;
+
 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
 {
        return (struct bcm_sock *)sk;
@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
                if (!op->count && (op->flags & TX_COUNTEVT)) {
 
                        /* create notification to user */
+                       memset(&msg_head, 0, sizeof(msg_head));
                        msg_head.opcode  = TX_EXPIRED;
                        msg_head.flags   = op->flags;
                        msg_head.count   = op->count;
@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
        /* this element is not throttled anymore */
        data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
 
+       memset(&head, 0, sizeof(head));
        head.opcode  = RX_CHANGED;
        head.flags   = op->flags;
        head.count   = op->count;
@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
        }
 
        /* create notification to user */
+       memset(&msg_head, 0, sizeof(msg_head));
        msg_head.opcode  = RX_TIMEOUT;
        msg_head.flags   = op->flags;
        msg_head.count   = op->count;
@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 /*
  * notification handler for netdevice status changes
  */
-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
-                       void *ptr)
+static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+                      struct net_device *dev)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
        struct sock *sk = &bo->sk;
        struct bcm_op *op;
        int notify_enodev = 0;
 
        if (!net_eq(dev_net(dev), sock_net(sk)))
-               return NOTIFY_DONE;
-
-       if (dev->type != ARPHRD_CAN)
-               return NOTIFY_DONE;
+               return;
 
        switch (msg) {
 
@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
                                sk->sk_error_report(sk);
                }
        }
+}
 
+static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+                       void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->type != ARPHRD_CAN)
+               return NOTIFY_DONE;
+       if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+               return NOTIFY_DONE;
+       if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
+               return NOTIFY_DONE;
+
+       spin_lock(&bcm_notifier_lock);
+       list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
+               spin_unlock(&bcm_notifier_lock);
+               bcm_notify(bcm_busy_notifier, msg, dev);
+               spin_lock(&bcm_notifier_lock);
+       }
+       bcm_busy_notifier = NULL;
+       spin_unlock(&bcm_notifier_lock);
        return NOTIFY_DONE;
 }
 
@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
        INIT_LIST_HEAD(&bo->rx_ops);
 
        /* set notifier */
-       bo->notifier.notifier_call = bcm_notifier;
-
-       register_netdevice_notifier(&bo->notifier);
+       spin_lock(&bcm_notifier_lock);
+       list_add_tail(&bo->notifier, &bcm_notifier_list);
+       spin_unlock(&bcm_notifier_lock);
 
        return 0;
 }
@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
 
        /* remove bcm_ops, timer, rx_unregister(), etc. */
 
-       unregister_netdevice_notifier(&bo->notifier);
+       spin_lock(&bcm_notifier_lock);
+       while (bcm_busy_notifier == bo) {
+               spin_unlock(&bcm_notifier_lock);
+               schedule_timeout_uninterruptible(1);
+               spin_lock(&bcm_notifier_lock);
+       }
+       list_del(&bo->notifier);
+       spin_unlock(&bcm_notifier_lock);
 
        lock_sock(sk);
 
@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
        .exit = canbcm_pernet_exit,
 };
 
+static struct notifier_block canbcm_notifier = {
+       .notifier_call = bcm_notifier
+};
+
 static int __init bcm_module_init(void)
 {
        int err;
@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
        }
 
        register_pernet_subsys(&canbcm_pernet_ops);
+       register_netdevice_notifier(&canbcm_notifier);
        return 0;
 }
 
 static void __exit bcm_module_exit(void)
 {
        can_proto_unregister(&bcm_can_proto);
+       unregister_netdevice_notifier(&canbcm_notifier);
        unregister_pernet_subsys(&canbcm_pernet_ops);
 }
 
index 253b244..be6183f 100644 (file)
@@ -143,10 +143,14 @@ struct isotp_sock {
        u32 force_tx_stmin;
        u32 force_rx_stmin;
        struct tpcon rx, tx;
-       struct notifier_block notifier;
+       struct list_head notifier;
        wait_queue_head_t wait;
 };
 
+static LIST_HEAD(isotp_notifier_list);
+static DEFINE_SPINLOCK(isotp_notifier_lock);
+static struct isotp_sock *isotp_busy_notifier;
+
 static inline struct isotp_sock *isotp_sk(const struct sock *sk)
 {
        return (struct isotp_sock *)sk;
@@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
        /* wait for complete transmission of current pdu */
        wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
 
-       unregister_netdevice_notifier(&so->notifier);
+       spin_lock(&isotp_notifier_lock);
+       while (isotp_busy_notifier == so) {
+               spin_unlock(&isotp_notifier_lock);
+               schedule_timeout_uninterruptible(1);
+               spin_lock(&isotp_notifier_lock);
+       }
+       list_del(&so->notifier);
+       spin_unlock(&isotp_notifier_lock);
 
        lock_sock(sk);
 
@@ -1317,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
        return 0;
 }
 
-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
-                         void *ptr)
+static void isotp_notify(struct isotp_sock *so, unsigned long msg,
+                        struct net_device *dev)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
        struct sock *sk = &so->sk;
 
        if (!net_eq(dev_net(dev), sock_net(sk)))
-               return NOTIFY_DONE;
-
-       if (dev->type != ARPHRD_CAN)
-               return NOTIFY_DONE;
+               return;
 
        if (so->ifindex != dev->ifindex)
-               return NOTIFY_DONE;
+               return;
 
        switch (msg) {
        case NETDEV_UNREGISTER:
@@ -1357,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
                        sk->sk_error_report(sk);
                break;
        }
+}
 
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+                         void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->type != ARPHRD_CAN)
+               return NOTIFY_DONE;
+       if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+               return NOTIFY_DONE;
+       if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
+               return NOTIFY_DONE;
+
+       spin_lock(&isotp_notifier_lock);
+       list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
+               spin_unlock(&isotp_notifier_lock);
+               isotp_notify(isotp_busy_notifier, msg, dev);
+               spin_lock(&isotp_notifier_lock);
+       }
+       isotp_busy_notifier = NULL;
+       spin_unlock(&isotp_notifier_lock);
        return NOTIFY_DONE;
 }
 
@@ -1394,8 +1421,9 @@ static int isotp_init(struct sock *sk)
 
        init_waitqueue_head(&so->wait);
 
-       so->notifier.notifier_call = isotp_notifier;
-       register_netdevice_notifier(&so->notifier);
+       spin_lock(&isotp_notifier_lock);
+       list_add_tail(&so->notifier, &isotp_notifier_list);
+       spin_unlock(&isotp_notifier_lock);
 
        return 0;
 }
@@ -1442,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
        .prot = &isotp_proto,
 };
 
+static struct notifier_block canisotp_notifier = {
+       .notifier_call = isotp_notifier
+};
+
 static __init int isotp_module_init(void)
 {
        int err;
@@ -1451,6 +1483,8 @@ static __init int isotp_module_init(void)
        err = can_proto_register(&isotp_can_proto);
        if (err < 0)
                pr_err("can: registration of isotp protocol failed\n");
+       else
+               register_netdevice_notifier(&canisotp_notifier);
 
        return err;
 }
@@ -1458,6 +1492,7 @@ static __init int isotp_module_init(void)
 static __exit void isotp_module_exit(void)
 {
        can_proto_unregister(&isotp_can_proto);
+       unregister_netdevice_notifier(&canisotp_notifier);
 }
 
 module_init(isotp_module_init);
index e09d087..c3946c3 100644 (file)
@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
 
        if ((do_skcb->offset + do_skb->len) < offset_start) {
                __skb_unlink(do_skb, &session->skb_queue);
+               /* drop ref taken in j1939_session_skb_queue() */
+               skb_unref(do_skb);
+
                kfree_skb(do_skb);
        }
        spin_unlock_irqrestore(&session->skb_queue.lock, flags);
@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
 
        skcb->flags |= J1939_ECU_LOCAL_SRC;
 
+       skb_get(skb);
        skb_queue_tail(&session->skb_queue, skb);
 }
 
 static struct
-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
-                                         unsigned int offset_start)
+sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+                                        unsigned int offset_start)
 {
        struct j1939_priv *priv = session->priv;
        struct j1939_sk_buff_cb *do_skcb;
@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
                        skb = do_skb;
                }
        }
+
+       if (skb)
+               skb_get(skb);
+
        spin_unlock_irqrestore(&session->skb_queue.lock, flags);
 
        if (!skb)
@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
        return skb;
 }
 
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
 {
        unsigned int offset_start;
 
        offset_start = session->pkt.dpo * 7;
-       return j1939_session_skb_find_by_offset(session, offset_start);
+       return j1939_session_skb_get_by_offset(session, offset_start);
 }
 
 /* see if we are receiver
@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
        int ret = 0;
        u8 dat[8];
 
-       se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
+       se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
        if (!se_skb)
                return -ENOBUFS;
 
@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
                        netdev_err_once(priv->ndev,
                                        "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
                                        __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
-                       return -EOVERFLOW;
+                       ret = -EOVERFLOW;
+                       goto out_free;
                }
 
                if (!len) {
@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
        if (pkt_done)
                j1939_tp_set_rxtimeout(session, 250);
 
+ out_free:
+       if (ret)
+               kfree_skb(se_skb);
+       else
+               consume_skb(se_skb);
+
        return ret;
 }
 
@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
 static int j1939_simple_txnext(struct j1939_session *session)
 {
        struct j1939_priv *priv = session->priv;
-       struct sk_buff *se_skb = j1939_session_skb_find(session);
+       struct sk_buff *se_skb = j1939_session_skb_get(session);
        struct sk_buff *skb;
        int ret;
 
@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
                return 0;
 
        skb = skb_clone(se_skb, GFP_ATOMIC);
-       if (!skb)
-               return -ENOMEM;
+       if (!skb) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
 
        can_skb_set_owner(skb, se_skb->sk);
 
@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
 
        ret = j1939_send_one(priv, skb);
        if (ret)
-               return ret;
+               goto out_free;
 
        j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
        j1939_sk_queue_activate_next(session);
 
-       return 0;
+ out_free:
+       if (ret)
+               kfree_skb(se_skb);
+       else
+               consume_skb(se_skb);
+
+       return ret;
 }
 
 static bool j1939_session_deactivate_locked(struct j1939_session *session)
@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
        struct sk_buff *skb;
 
        if (!session->transmission) {
-               skb = j1939_session_skb_find(session);
+               skb = j1939_session_skb_get(session);
                /* distribute among j1939 receivers */
                j1939_sk_recv(session->priv, skb);
+               consume_skb(skb);
        }
 
        j1939_session_deactivate_activate_next(session);
@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
 {
        struct j1939_priv *priv = session->priv;
        struct j1939_sk_buff_cb *skcb;
-       struct sk_buff *se_skb;
+       struct sk_buff *se_skb = NULL;
        const u8 *dat;
        u8 *tpdat;
        int offset;
@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                goto out_session_cancel;
        }
 
-       se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
+       se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
        if (!se_skb) {
                netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
                            session);
@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                j1939_tp_set_rxtimeout(session, 250);
        }
        session->last_cmd = 0xff;
+       consume_skb(se_skb);
        j1939_session_put(session);
 
        return;
 
  out_session_cancel:
+       kfree_skb(se_skb);
        j1939_session_timers_cancel(session);
        j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
        j1939_session_put(session);
index 139d947..ac96fc2 100644 (file)
@@ -83,7 +83,7 @@ struct raw_sock {
        struct sock sk;
        int bound;
        int ifindex;
-       struct notifier_block notifier;
+       struct list_head notifier;
        int loopback;
        int recv_own_msgs;
        int fd_frames;
@@ -95,6 +95,10 @@ struct raw_sock {
        struct uniqframe __percpu *uniq;
 };
 
+static LIST_HEAD(raw_notifier_list);
+static DEFINE_SPINLOCK(raw_notifier_lock);
+static struct raw_sock *raw_busy_notifier;
+
 /* Return pointer to store the extra msg flags for raw_recvmsg().
  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
  * in skb->cb.
@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
        return err;
 }
 
-static int raw_notifier(struct notifier_block *nb,
-                       unsigned long msg, void *ptr)
+static void raw_notify(struct raw_sock *ro, unsigned long msg,
+                      struct net_device *dev)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
        struct sock *sk = &ro->sk;
 
        if (!net_eq(dev_net(dev), sock_net(sk)))
-               return NOTIFY_DONE;
-
-       if (dev->type != ARPHRD_CAN)
-               return NOTIFY_DONE;
+               return;
 
        if (ro->ifindex != dev->ifindex)
-               return NOTIFY_DONE;
+               return;
 
        switch (msg) {
        case NETDEV_UNREGISTER:
@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
                        sk->sk_error_report(sk);
                break;
        }
+}
+
+static int raw_notifier(struct notifier_block *nb, unsigned long msg,
+                       void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->type != ARPHRD_CAN)
+               return NOTIFY_DONE;
+       if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+               return NOTIFY_DONE;
+       if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
+               return NOTIFY_DONE;
 
+       spin_lock(&raw_notifier_lock);
+       list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
+               spin_unlock(&raw_notifier_lock);
+               raw_notify(raw_busy_notifier, msg, dev);
+               spin_lock(&raw_notifier_lock);
+       }
+       raw_busy_notifier = NULL;
+       spin_unlock(&raw_notifier_lock);
        return NOTIFY_DONE;
 }
 
@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
                return -ENOMEM;
 
        /* set notifier */
-       ro->notifier.notifier_call = raw_notifier;
-
-       register_netdevice_notifier(&ro->notifier);
+       spin_lock(&raw_notifier_lock);
+       list_add_tail(&ro->notifier, &raw_notifier_list);
+       spin_unlock(&raw_notifier_lock);
 
        return 0;
 }
@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
 
        ro = raw_sk(sk);
 
-       unregister_netdevice_notifier(&ro->notifier);
+       spin_lock(&raw_notifier_lock);
+       while (raw_busy_notifier == ro) {
+               spin_unlock(&raw_notifier_lock);
+               schedule_timeout_uninterruptible(1);
+               spin_lock(&raw_notifier_lock);
+       }
+       list_del(&ro->notifier);
+       spin_unlock(&raw_notifier_lock);
 
        lock_sock(sk);
 
@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
        .prot       = &raw_proto,
 };
 
+static struct notifier_block canraw_notifier = {
+       .notifier_call = raw_notifier
+};
+
 static __init int raw_module_init(void)
 {
        int err;
@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
        err = can_proto_register(&raw_can_proto);
        if (err < 0)
                pr_err("can: registration of raw protocol failed\n");
+       else
+               register_netdevice_notifier(&canraw_notifier);
 
        return err;
 }
@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
 static __exit void raw_module_exit(void)
 {
        can_proto_unregister(&raw_can_proto);
+       unregister_netdevice_notifier(&canraw_notifier);
 }
 
 module_init(raw_module_init);
index ddd15af..210fc3b 100644 (file)
@@ -177,7 +177,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
        if (kcmlen > stackbuf_size)
                kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
        if (kcmsg == NULL)
-               return -ENOBUFS;
+               return -ENOMEM;
 
        /* Now copy them over neatly. */
        memset(kcmsg, 0, kcmlen);
index 4eb9695..051432e 100644 (file)
@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
        case DEVLINK_PORT_FLAVOUR_CPU:
        case DEVLINK_PORT_FLAVOUR_DSA:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
                                attrs->phys.port_number))
                        return -EMSGSIZE;
@@ -8631,7 +8630,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
 
        switch (attrs->flavour) {
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                if (!attrs->split)
                        n = snprintf(name, len, "p%u", attrs->phys.port_number);
                else
@@ -8679,6 +8677,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
                n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
                             attrs->pci_sf.sf);
                break;
+       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+               return -EOPNOTSUPP;
        }
 
        if (n >= len)
index cd80ffe..a9f9379 100644 (file)
@@ -1168,7 +1168,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
 {
        struct net *net;
        struct sk_buff *skb;
-       int err = -ENOBUFS;
+       int err = -ENOMEM;
 
        net = ops->fro_net;
        skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
index 98f20ef..bf77457 100644 (file)
@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 
                        write_lock(&n->lock);
                        if ((n->nud_state == NUD_FAILED) ||
+                           (n->nud_state == NUD_NOARP) ||
                            (tbl->is_multicast &&
                             tbl->is_multicast(n->primary_key)) ||
                            time_after(tref, n->updated))
index 43b6ac4..9b5a767 100644 (file)
@@ -641,6 +641,18 @@ void __put_net(struct net *net)
 }
 EXPORT_SYMBOL_GPL(__put_net);
 
+/**
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+ * Returns the net's common namespace.
+ */
+struct ns_common *get_net_ns(struct ns_common *ns)
+{
+       return &get_net(container_of(ns, struct net, ns))->ns;
+}
+EXPORT_SYMBOL_GPL(get_net_ns);
+
 struct net *get_net_ns_by_fd(int fd)
 {
        struct file *file;
@@ -660,14 +672,8 @@ struct net *get_net_ns_by_fd(int fd)
        fput(file);
        return net;
 }
-
-#else
-struct net *get_net_ns_by_fd(int fd)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif
 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
+#endif
 
 struct net *get_net_ns_by_pid(pid_t pid)
 {
index 714d5fa..ec931b0 100644 (file)
@@ -4842,6 +4842,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
        if (err < 0)
                goto errout;
 
+       /* Notification info is only filled for bridge ports, not the bridge
+        * device itself. Therefore, a zero notification length is valid and
+        * should not result in an error.
+        */
        if (!skb->len)
                goto errout;
 
index 3ad2287..bbc3b4b 100644 (file)
@@ -1253,6 +1253,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
        struct sock *sk = skb->sk;
        struct sk_buff_head *q;
        unsigned long flags;
+       bool is_zerocopy;
        u32 lo, hi;
        u16 len;
 
@@ -1267,6 +1268,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
        len = uarg->len;
        lo = uarg->id;
        hi = uarg->id + len - 1;
+       is_zerocopy = uarg->zerocopy;
 
        serr = SKB_EXT_ERR(skb);
        memset(serr, 0, sizeof(*serr));
@@ -1274,7 +1276,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
        serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
        serr->ee.ee_data = hi;
        serr->ee.ee_info = lo;
-       if (!uarg->zerocopy)
+       if (!is_zerocopy)
                serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
 
        q = &sk->sk_error_queue;
index 958614e..946888a 100644 (file)
@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
 }
 EXPORT_SYMBOL(sock_set_rcvbuf);
 
+static void __sock_set_mark(struct sock *sk, u32 val)
+{
+       if (val != sk->sk_mark) {
+               sk->sk_mark = val;
+               sk_dst_reset(sk);
+       }
+}
+
 void sock_set_mark(struct sock *sk, u32 val)
 {
        lock_sock(sk);
-       sk->sk_mark = val;
+       __sock_set_mark(sk, val);
        release_sock(sk);
 }
 EXPORT_SYMBOL(sock_set_mark);
@@ -1126,10 +1134,10 @@ set_sndbuf:
        case SO_MARK:
                if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
                        ret = -EPERM;
-               } else if (val != sk->sk_mark) {
-                       sk->sk_mark = val;
-                       sk_dst_reset(sk);
+                       break;
                }
+
+               __sock_set_mark(sk, val);
                break;
 
        case SO_RXQ_OVFL:
index 008c1ec..122ad58 100644 (file)
@@ -64,7 +64,7 @@
 #define DSA_8021Q_SUBVLAN_HI_SHIFT     9
 #define DSA_8021Q_SUBVLAN_HI_MASK      GENMASK(9, 9)
 #define DSA_8021Q_SUBVLAN_LO_SHIFT     4
-#define DSA_8021Q_SUBVLAN_LO_MASK      GENMASK(4, 3)
+#define DSA_8021Q_SUBVLAN_LO_MASK      GENMASK(5, 4)
 #define DSA_8021Q_SUBVLAN_HI(x)                (((x) & GENMASK(2, 2)) >> 2)
 #define DSA_8021Q_SUBVLAN_LO(x)                ((x) & GENMASK(1, 0))
 #define DSA_8021Q_SUBVLAN(x)           \
index 2a6733a..5d38e90 100644 (file)
@@ -95,7 +95,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
        if (dev->sfp_bus)
                return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
 
-       if (ops->get_module_info)
+       if (ops->get_module_eeprom_by_page)
                return ops->get_module_eeprom_by_page(dev, page_data, extack);
 
        return -EOPNOTSUPP;
index 3fa7a39..baa5d10 100644 (file)
@@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
        if (eeprom.offset + eeprom.len > total_len)
                return -EINVAL;
 
-       data = kmalloc(PAGE_SIZE, GFP_USER);
+       data = kzalloc(PAGE_SIZE, GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
        if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
                return -EINVAL;
 
-       data = kmalloc(PAGE_SIZE, GFP_USER);
+       data = kzalloc(PAGE_SIZE, GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
                return -EFAULT;
 
        test.len = test_len;
-       data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
+       data = kcalloc(test_len, sizeof(u64), GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -2293,7 +2293,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
        ret = ethtool_tunable_valid(&tuna);
        if (ret)
                return ret;
-       data = kmalloc(tuna.len, GFP_USER);
+       data = kzalloc(tuna.len, GFP_USER);
        if (!data)
                return -ENOMEM;
        ret = ops->get_tunable(dev, &tuna, data);
@@ -2485,7 +2485,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
        ret = ethtool_phy_tunable_valid(&tuna);
        if (ret)
                return ret;
-       data = kmalloc(tuna.len, GFP_USER);
+       data = kzalloc(tuna.len, GFP_USER);
        if (!data)
                return -ENOMEM;
        if (phy_drv_tunable) {
index b3029ff..2d51b7a 100644 (file)
@@ -353,6 +353,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
        int len = 0;
        int ret;
 
+       len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
+
        for (i = 0; i < ETH_SS_COUNT; i++) {
                const struct strset_info *set_info = &data->sets[i];
 
index 0c1b077..29bf976 100644 (file)
@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
            nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
            nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
                        be32_to_cpu(params.frame_counter)) ||
-           ieee802154_llsec_fill_key_id(msg, &params.out_key))
+           ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
+               rc = -ENOBUFS;
                goto out_free;
+       }
 
        dev_put(dev);
 
@@ -1184,7 +1186,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
 {
        struct ieee802154_llsec_device *dpos;
        struct ieee802154_llsec_device_key *kpos;
-       int rc = 0, idx = 0, idx2;
+       int idx = 0, idx2;
 
        list_for_each_entry(dpos, &data->table->devices, list) {
                if (idx++ < data->s_idx)
@@ -1200,7 +1202,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
                                                      data->nlmsg_seq,
                                                      dpos->hwaddr, kpos,
                                                      data->dev)) {
-                               return rc = -EMSGSIZE;
+                               return -EMSGSIZE;
                        }
 
                        data->s_idx2++;
@@ -1209,7 +1211,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
                data->s_idx++;
        }
 
-       return rc;
+       return 0;
 }
 
 int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
index 2cdc7e6..88215b5 100644 (file)
@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
-           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
+               rc = -EMSGSIZE;
                goto nla_put_failure;
+       }
        dev_put(dev);
 
        wpan_phy_put(phy);
index 05f6bd8..0cf2374 100644 (file)
@@ -1298,19 +1298,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
        if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
                return -EINVAL;
 
-       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
-           !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
-           !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
-             attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
                return -EINVAL;
 
        addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
        addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
        switch (addr->mode) {
        case NL802154_DEV_ADDR_SHORT:
+               if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
+                       return -EINVAL;
                addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
                break;
        case NL802154_DEV_ADDR_EXTENDED:
+               if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
+                       return -EINVAL;
                addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
                break;
        default:
index f17870e..2f94d22 100644 (file)
@@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
                        return err;
        }
 
-       if (!inet_sk(sk)->inet_num && inet_autobind(sk))
+       if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
                return -EAGAIN;
        return sk->sk_prot->connect(sk, uaddr, addr_len);
 }
@@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
        sock_rps_record_flow(sk);
 
        /* We may need to bind the socket. */
-       if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
+       if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
            inet_autobind(sk))
                return -EAGAIN;
 
index bfaf327..e0480c6 100644 (file)
@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
                kfree(doi_def->map.std->lvl.local);
                kfree(doi_def->map.std->cat.cipso);
                kfree(doi_def->map.std->cat.local);
+               kfree(doi_def->map.std);
                break;
        }
        kfree(doi_def);
index 2e35f68..1c6429c 100644 (file)
@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
                return -EAFNOSUPPORT;
 
        if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
-               BUG();
+               return -EINVAL;
 
        if (tb[IFLA_INET_CONF]) {
                nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
index 7b6931a..752e392 100644 (file)
@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
                icmp_param.data_len = room;
        icmp_param.head_len = sizeof(struct icmphdr);
 
+       /* if we don't have a source address at this point, fall back to the
+        * dummy address instead of sending out a packet with a source address
+        * of 0.0.0.0
+        */
+       if (!fl4.saddr)
+               fl4.saddr = htonl(INADDR_DUMMY);
+
        icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
 ende:
        ip_rt_put(rt);
index 7b272bb..6b3c558 100644 (file)
@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
        while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
                in_dev->mc_list = i->next_rcu;
                in_dev->mc_count--;
+               ip_mc_clear_src(i);
                ip_ma_put(i);
        }
 }
index bc2f6ca..816d8aa 100644 (file)
@@ -886,7 +886,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
 
 
 /*
- *  Copy BOOTP-supplied string if not already set.
+ *  Copy BOOTP-supplied string
  */
 static int __init ic_bootp_string(char *dest, char *src, int len, int max)
 {
@@ -935,12 +935,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
                }
                break;
        case 12:        /* Host name */
-               ic_bootp_string(utsname()->nodename, ext+1, *ext,
-                               __NEW_UTS_LEN);
-               ic_host_name_set = 1;
+               if (!ic_host_name_set) {
+                       ic_bootp_string(utsname()->nodename, ext+1, *ext,
+                                       __NEW_UTS_LEN);
+                       ic_host_name_set = 1;
+               }
                break;
        case 15:        /* Domain name (DNS) */
-               ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
+               if (!ic_domain[0])
+                       ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
                break;
        case 17:        /* Root path */
                if (!root_server_path[0])
index 1c9f71a..95a7183 100644 (file)
@@ -954,6 +954,7 @@ bool ping_rcv(struct sk_buff *skb)
        struct sock *sk;
        struct net *net = dev_net(skb->dev);
        struct icmphdr *icmph = icmp_hdr(skb);
+       bool rc = false;
 
        /* We assume the packet has already been checked by icmp_rcv */
 
@@ -968,14 +969,15 @@ bool ping_rcv(struct sk_buff *skb)
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                pr_debug("rcv on socket %p\n", sk);
-               if (skb2)
-                       ping_queue_rcv_skb(sk, skb2);
+               if (skb2 && !ping_queue_rcv_skb(sk, skb2))
+                       rc = true;
                sock_put(sk);
-               return true;
        }
-       pr_debug("no socket, dropping\n");
 
-       return false;
+       if (!rc)
+               pr_debug("no socket, dropping\n");
+
+       return rc;
 }
 EXPORT_SYMBOL_GPL(ping_rcv);
 
index f6787c5..6a36ac9 100644 (file)
@@ -2056,6 +2056,19 @@ martian_source:
        return err;
 }
 
+/* get device for dst_alloc with local routes */
+static struct net_device *ip_rt_get_dev(struct net *net,
+                                       const struct fib_result *res)
+{
+       struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
+       struct net_device *dev = NULL;
+
+       if (nhc)
+               dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
+
+       return dev ? : net->loopback_dev;
+}
+
 /*
  *     NOTE. We drop all the packets that has local source
  *     addresses, because every properly looped back packet
@@ -2212,7 +2225,7 @@ local_input:
                }
        }
 
-       rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+       rth = rt_dst_alloc(ip_rt_get_dev(net, res),
                           flags | RTCF_LOCAL, res->type,
                           IN_DEV_ORCONF(in_dev, NOPOLICY), false);
        if (!rth)
index 15f5504..1307ad0 100644 (file)
@@ -2607,6 +2607,9 @@ void udp_destroy_sock(struct sock *sk)
 {
        struct udp_sock *up = udp_sk(sk);
        bool slow = lock_sock_fast(sk);
+
+       /* protects from races with udp_abort() */
+       sock_set_flag(sk, SOCK_DEAD);
        udp_flush_pending_frames(sk);
        unlock_sock_fast(sk, slow);
        if (static_branch_unlikely(&udp_encap_needed_key)) {
@@ -2857,10 +2860,17 @@ int udp_abort(struct sock *sk, int err)
 {
        lock_sock(sk);
 
+       /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
+        * with close()
+        */
+       if (sock_flag(sk, SOCK_DEAD))
+               goto out;
+
        sk->sk_err = err;
        sk->sk_error_report(sk);
        __udp_disconnect(sk, 0);
 
+out:
        release_sock(sk);
 
        return 0;
index b0ef65e..701eb82 100644 (file)
@@ -5827,7 +5827,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
                return -EAFNOSUPPORT;
 
        if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
-               BUG();
+               return -EINVAL;
 
        if (tb[IFLA_INET6_TOKEN]) {
                err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
index e204163..92f3235 100644 (file)
@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
 }
 EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
 
+static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
+{
+       if (likely(next != IPPROTO_ICMPV6))
+               return false;
+
+       if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
+               return false;
+
+       return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
+}
+
 void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
                   const struct nft_pktinfo *pkt)
 {
@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
        lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
 
-       if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
-           nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
-               nft_fib_store_result(dest, priv, nft_in(pkt));
-               return;
+       if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
+           nft_hook(pkt) == NF_INET_INGRESS) {
+               if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
+                   nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
+                       nft_fib_store_result(dest, priv, nft_in(pkt));
+                       return;
+               }
        }
 
        *dest = 0;
index a22822b..d417e51 100644 (file)
@@ -3673,11 +3673,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
        if (nh) {
                if (rt->fib6_src.plen) {
                        NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
-                       goto out;
+                       goto out_free;
                }
                if (!nexthop_get(nh)) {
                        NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
-                       goto out;
+                       goto out_free;
                }
                rt->nh = nh;
                fib6_nh = nexthop_fib6_nh(rt->nh);
@@ -3714,6 +3714,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
 out:
        fib6_info_release(rt);
        return ERR_PTR(err);
+out_free:
+       ip_fib_metrics_put(rt->fib6_metrics);
+       kfree(rt);
+       return ERR_PTR(err);
 }
 
 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
index aa98294..f7c8110 100644 (file)
@@ -271,6 +271,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (ipip6_tunnel_create(dev) < 0)
                goto failed_free;
 
+       if (!parms->name[0])
+               strcpy(parms->name, dev->name);
+
        return nt;
 
 failed_free:
index 199b080..3fcd86f 100644 (file)
@@ -1598,6 +1598,9 @@ void udpv6_destroy_sock(struct sock *sk)
 {
        struct udp_sock *up = udp_sk(sk);
        lock_sock(sk);
+
+       /* protects from races with udp_abort() */
+       sock_set_flag(sk, SOCK_DEAD);
        udp_v6_flush_pending_frames(sk);
        release_sock(sk);
 
index 9245c04..fc34ae2 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
  */
 
 #include <linux/debugfs.h>
@@ -387,10 +387,17 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
                           size_t count, loff_t *ppos)
 {
        struct ieee80211_local *local = file->private_data;
+       int ret;
 
        rtnl_lock();
+       wiphy_lock(local->hw.wiphy);
        __ieee80211_suspend(&local->hw, NULL);
-       __ieee80211_resume(&local->hw);
+       ret = __ieee80211_resume(&local->hw);
+       wiphy_unlock(local->hw.wiphy);
+
+       if (ret)
+               cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
        rtnl_unlock();
 
        return count;
index 214404a..648696b 100644 (file)
@@ -1442,7 +1442,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 
-       if (WARN_ON_ONCE(!chanctx_conf)) {
+       if (!chanctx_conf) {
                rcu_read_unlock();
                return NULL;
        }
index 2e2f73a..137fa4c 100644 (file)
@@ -476,14 +476,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
                                   GFP_KERNEL);
        }
 
-       /* APs need special treatment */
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
-               struct ieee80211_sub_if_data *vlan, *tmpsdata;
-
-               /* down all dependent devices, that is VLANs */
-               list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
-                                        u.vlan.list)
-                       dev_close(vlan->dev);
                WARN_ON(!list_empty(&sdata->u.ap.vlans));
        } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
                /* remove all packets in parent bc_buf pointing to this dev */
@@ -641,6 +634,15 @@ static int ieee80211_stop(struct net_device *dev)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
+       /* close all dependent VLAN interfaces before locking wiphy */
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *vlan, *tmpsdata;
+
+               list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+                                        u.vlan.list)
+                       dev_close(vlan->dev);
+       }
+
        wiphy_lock(sdata->local->hw.wiphy);
        ieee80211_do_stop(sdata, true);
        wiphy_unlock(sdata->local->hw.wiphy);
@@ -1591,6 +1593,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
+               if (!list_empty(&sdata->u.ap.vlans))
+                       return -EBUSY;
+               break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_OCB:
index 62145e5..f33a3ac 100644 (file)
@@ -252,6 +252,7 @@ static void ieee80211_restart_work(struct work_struct *work)
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, restart_work);
        struct ieee80211_sub_if_data *sdata;
+       int ret;
 
        /* wait for scan work complete */
        flush_workqueue(local->workqueue);
@@ -301,8 +302,12 @@ static void ieee80211_restart_work(struct work_struct *work)
        /* wait for all packet processing to be done */
        synchronize_net();
 
-       ieee80211_reconfig(local);
+       ret = ieee80211_reconfig(local);
        wiphy_unlock(local->hw.wiphy);
+
+       if (ret)
+               cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
        rtnl_unlock();
 }
 
index 2480bd0..3f2aad2 100644 (file)
@@ -4062,10 +4062,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                if (elems.mbssid_config_ie)
                        bss_conf->profile_periodicity =
                                elems.mbssid_config_ie->profile_periodicity;
+               else
+                       bss_conf->profile_periodicity = 0;
 
                if (elems.ext_capab_len >= 11 &&
                    (elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
                        bss_conf->ema_ap = true;
+               else
+                       bss_conf->ema_ap = false;
 
                /* continue assoc process */
                ifmgd->assoc_data->timeout = jiffies;
@@ -5802,12 +5806,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                                              beacon_ies->data, beacon_ies->len);
                if (elem && elem->datalen >= 3)
                        sdata->vif.bss_conf.profile_periodicity = elem->data[2];
+               else
+                       sdata->vif.bss_conf.profile_periodicity = 0;
 
                elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
                                          beacon_ies->data, beacon_ies->len);
                if (elem && elem->datalen >= 11 &&
                    (elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
                        sdata->vif.bss_conf.ema_ap = true;
+               else
+                       sdata->vif.bss_conf.ema_ap = false;
        } else {
                assoc_data->timeout = jiffies;
                assoc_data->timeout_started = true;
index 6487b05..a6f3fb4 100644 (file)
@@ -1514,7 +1514,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
            (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
                return;
 
-       if (time_is_before_jiffies(mi->sample_time))
+       if (time_is_after_jiffies(mi->sample_time))
                return;
 
        mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
index 1bb43ed..af0ef45 100644 (file)
@@ -2240,17 +2240,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (is_multicast_ether_addr(hdr->addr1)) {
-               I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
-               goto out_no_led;
-       }
-
        if (rx->sta)
                cache = &rx->sta->frags;
 
        if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
                goto out;
 
+       if (is_multicast_ether_addr(hdr->addr1))
+               return RX_DROP_MONITOR;
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -2376,7 +2374,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 
  out:
        ieee80211_led_rx(rx->local);
- out_no_led:
        if (rx->sta)
                rx->sta->rx_stats.packets++;
        return RX_CONTINUE;
index d4cc9ac..6b50cb5 100644 (file)
@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
        struct ieee80211_mgmt *mgmt = (void *)skb->data;
        struct ieee80211_bss *bss;
        struct ieee80211_channel *channel;
+       size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+                                     u.probe_resp.variable);
+
+       if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+           !ieee80211_is_beacon(mgmt->frame_control) &&
+           !ieee80211_is_s1g_beacon(mgmt->frame_control))
+               return;
 
        if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
-               if (skb->len < 15)
-                       return;
-       } else if (skb->len < 24 ||
-                (!ieee80211_is_probe_resp(mgmt->frame_control) &&
-                 !ieee80211_is_beacon(mgmt->frame_control)))
+               if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+                       min_hdr_len = offsetof(struct ieee80211_ext,
+                                              u.s1g_short_beacon.variable);
+               else
+                       min_hdr_len = offsetof(struct ieee80211_ext,
+                                              u.s1g_beacon);
+       }
+
+       if (skb->len < min_hdr_len)
                return;
 
        sdata1 = rcu_dereference(local->scan_sdata);
index 0b719f3..2651498 100644 (file)
@@ -2014,6 +2014,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx(sdata, sta, skb, false);
 }
 
+static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
+{
+       struct ieee80211_radiotap_header *rthdr =
+               (struct ieee80211_radiotap_header *)skb->data;
+
+       /* check for not even having the fixed radiotap header part */
+       if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+               return false; /* too short to be possibly valid */
+
+       /* is it a header version we can trust to find length from? */
+       if (unlikely(rthdr->it_version))
+               return false; /* only version 0 is supported */
+
+       /* does the skb contain enough to deliver on the alleged length? */
+       if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+               return false; /* skb too short for claimed rt header extent */
+
+       return true;
+}
+
 bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                                 struct net_device *dev)
 {
@@ -2022,8 +2042,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
        struct ieee80211_radiotap_header *rthdr =
                (struct ieee80211_radiotap_header *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_supported_band *sband =
-               local->hw.wiphy->bands[info->band];
        int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
                                                   NULL);
        u16 txflags;
@@ -2036,17 +2054,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
        u8 vht_mcs = 0, vht_nss = 0;
        int i;
 
-       /* check for not even having the fixed radiotap header part */
-       if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
-               return false; /* too short to be possibly valid */
-
-       /* is it a header version we can trust to find length from? */
-       if (unlikely(rthdr->it_version))
-               return false; /* only version 0 is supported */
-
-       /* does the skb contain enough to deliver on the alleged length? */
-       if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
-               return false; /* skb too short for claimed rt header extent */
+       if (!ieee80211_validate_radiotap_len(skb))
+               return false;
 
        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
                       IEEE80211_TX_CTL_DONTFRAG;
@@ -2186,6 +2195,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                return false;
 
        if (rate_found) {
+               struct ieee80211_supported_band *sband =
+                       local->hw.wiphy->bands[info->band];
+
                info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
 
                for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -2199,7 +2211,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
                        ieee80211_rate_set_vht(info->control.rates, vht_mcs,
                                               vht_nss);
-               } else {
+               } else if (sband) {
                        for (i = 0; i < sband->n_bitrates; i++) {
                                if (rate * 5 != sband->bitrates[i].bitrate)
                                        continue;
@@ -2236,8 +2248,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
        info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
                      IEEE80211_TX_CTL_INJECTED;
 
-       /* Sanity-check and process the injection radiotap header */
-       if (!ieee80211_parse_tx_radiotap(skb, dev))
+       /* Sanity-check the length of the radiotap header */
+       if (!ieee80211_validate_radiotap_len(skb))
                goto fail;
 
        /* we now know there is a radiotap header with a length we can use */
@@ -2351,6 +2363,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
        ieee80211_select_queue_80211(sdata, skb, hdr);
        skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
 
+       /*
+        * Process the radiotap header. This will now take into account the
+        * selected chandef above to accurately set injection rates and
+        * retransmissions.
+        */
+       if (!ieee80211_parse_tx_radiotap(skb, dev))
+               goto fail_rcu;
+
        /* remove the injection radiotap header */
        skb_pull(skb, len_rthdr);
 
index 0a0481f..060059e 100644 (file)
@@ -947,7 +947,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
 
        switch (elem->data[0]) {
        case WLAN_EID_EXT_HE_MU_EDCA:
-               if (len == sizeof(*elems->mu_edca_param_set)) {
+               if (len >= sizeof(*elems->mu_edca_param_set)) {
                        elems->mu_edca_param_set = data;
                        if (crc)
                                *crc = crc32_be(*crc, (void *)elem,
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                }
                break;
        case WLAN_EID_EXT_UORA:
-               if (len == 1)
+               if (len >= 1)
                        elems->uora_element = data;
                break;
        case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
@@ -976,7 +976,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                        elems->max_channel_switch_time = data;
                break;
        case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
-               if (len == sizeof(*elems->mbssid_config_ie))
+               if (len >= sizeof(*elems->mbssid_config_ie))
                        elems->mbssid_config_ie = data;
                break;
        case WLAN_EID_EXT_HE_SPR:
@@ -985,7 +985,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                        elems->he_spr = data;
                break;
        case WLAN_EID_EXT_HE_6GHZ_CAPA:
-               if (len == sizeof(*elems->he_6ghz_capa))
+               if (len >= sizeof(*elems->he_6ghz_capa))
                        elems->he_6ghz_capa = data;
                break;
        }
@@ -1074,14 +1074,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
 
                switch (id) {
                case WLAN_EID_LINK_ID:
-                       if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
+                       if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
                                elem_parse_failed = true;
                                break;
                        }
                        elems->lnk_id = (void *)(pos - 2);
                        break;
                case WLAN_EID_CHAN_SWITCH_TIMING:
-                       if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
+                       if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
                                elem_parse_failed = true;
                                break;
                        }
@@ -1244,7 +1244,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        elems->sec_chan_offs = (void *)pos;
                        break;
                case WLAN_EID_CHAN_SWITCH_PARAM:
-                       if (elen !=
+                       if (elen <
                            sizeof(*elems->mesh_chansw_params_ie)) {
                                elem_parse_failed = true;
                                break;
@@ -1253,7 +1253,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        break;
                case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
                        if (!action ||
-                           elen != sizeof(*elems->wide_bw_chansw_ie)) {
+                           elen < sizeof(*elems->wide_bw_chansw_ie)) {
                                elem_parse_failed = true;
                                break;
                        }
@@ -1272,7 +1272,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
                                              pos, elen);
                        if (ie) {
-                               if (ie[1] == sizeof(*elems->wide_bw_chansw_ie))
+                               if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
                                        elems->wide_bw_chansw_ie =
                                                (void *)(ie + 2);
                                else
@@ -1316,7 +1316,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        elems->cisco_dtpc_elem = pos;
                        break;
                case WLAN_EID_ADDBA_EXT:
-                       if (elen != sizeof(struct ieee80211_addba_ext_ie)) {
+                       if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
                                elem_parse_failed = true;
                                break;
                        }
@@ -1342,7 +1342,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                                                          elem, elems);
                        break;
                case WLAN_EID_S1G_CAPABILITIES:
-                       if (elen == sizeof(*elems->s1g_capab))
+                       if (elen >= sizeof(*elems->s1g_capab))
                                elems->s1g_capab = (void *)pos;
                        else
                                elem_parse_failed = true;
@@ -2178,8 +2178,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
        list_for_each_entry(ctx, &local->chanctx_list, list)
                ctx->driver_present = false;
        mutex_unlock(&local->chanctx_mtx);
-
-       cfg80211_shutdown_all_interfaces(local->hw.wiphy);
 }
 
 static void ieee80211_assign_chanctx(struct ieee80211_local *local,
index 6b825fb..9b263f2 100644 (file)
@@ -356,6 +356,8 @@ void mptcp_get_options(const struct sk_buff *skb,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize = *ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
index 2bc1995..6323500 100644 (file)
@@ -280,11 +280,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
 
        /* try to fetch required memory from subflow */
        if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
-               if (ssk->sk_forward_alloc < skb->truesize)
-                       goto drop;
-               __sk_mem_reclaim(ssk, skb->truesize);
-               if (!sk_rmem_schedule(sk, skb, skb->truesize))
+               int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
+
+               if (ssk->sk_forward_alloc < amount)
                        goto drop;
+
+               ssk->sk_forward_alloc -= amount;
+               sk->sk_forward_alloc += amount;
        }
 
        /* the skb map_seq accounts for the skb offset:
@@ -668,18 +670,22 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
 /* In most cases we will be able to lock the mptcp socket.  If its already
  * owned, we need to defer to the work queue to avoid ABBA deadlock.
  */
-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
 {
        struct sock *sk = (struct sock *)msk;
        unsigned int moved = 0;
 
        if (inet_sk_state_load(sk) == TCP_CLOSE)
-               return;
-
-       mptcp_data_lock(sk);
+               return false;
 
        __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
        __mptcp_ofo_queue(msk);
+       if (unlikely(ssk->sk_err)) {
+               if (!sock_owned_by_user(sk))
+                       __mptcp_error_report(sk);
+               else
+                       set_bit(MPTCP_ERROR_REPORT,  &msk->flags);
+       }
 
        /* If the moves have caught up with the DATA_FIN sequence number
         * it's time to ack the DATA_FIN and change socket state, but
@@ -688,7 +694,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
         */
        if (mptcp_pending_data_fin(sk, NULL))
                mptcp_schedule_work(sk);
-       mptcp_data_unlock(sk);
+       return moved > 0;
 }
 
 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
@@ -696,7 +702,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        struct mptcp_sock *msk = mptcp_sk(sk);
        int sk_rbuf, ssk_rbuf;
-       bool wake;
 
        /* The peer can send data while we are shutting down this
         * subflow at msk destruction time, but we must avoid enqueuing
@@ -705,28 +710,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
        if (unlikely(subflow->disposable))
                return;
 
-       /* move_skbs_to_msk below can legitly clear the data_avail flag,
-        * but we will need later to properly woke the reader, cache its
-        * value
-        */
-       wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
-       if (wake)
-               set_bit(MPTCP_DATA_READY, &msk->flags);
-
        ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
        sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
        if (unlikely(ssk_rbuf > sk_rbuf))
                sk_rbuf = ssk_rbuf;
 
-       /* over limit? can't append more skbs to msk */
+       /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
        if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
-               goto wake;
-
-       move_skbs_to_msk(msk, ssk);
+               return;
 
-wake:
-       if (wake)
+       /* Wake-up the reader only for in-sequence data */
+       mptcp_data_lock(sk);
+       if (move_skbs_to_msk(msk, ssk)) {
+               set_bit(MPTCP_DATA_READY, &msk->flags);
                sk->sk_data_ready(sk);
+       }
+       mptcp_data_unlock(sk);
 }
 
 static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
@@ -858,7 +857,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
        sock_owned_by_me(sk);
 
        mptcp_for_each_subflow(msk, subflow) {
-               if (subflow->data_avail)
+               if (READ_ONCE(subflow->data_avail))
                        return mptcp_subflow_tcp_sock(subflow);
        }
 
@@ -947,6 +946,10 @@ static void __mptcp_update_wmem(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
+
        if (!msk->wmem_reserved)
                return;
 
@@ -1085,10 +1088,20 @@ out:
 
 static void __mptcp_clean_una_wakeup(struct sock *sk)
 {
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
        __mptcp_clean_una(sk);
        mptcp_write_space(sk);
 }
 
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+       mptcp_data_lock(sk);
+       __mptcp_clean_una_wakeup(sk);
+       mptcp_data_unlock(sk);
+}
+
 static void mptcp_enter_memory_pressure(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow;
@@ -1941,6 +1954,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
                done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
                mptcp_data_unlock(sk);
                tcp_cleanup_rbuf(ssk, moved);
+
+               if (unlikely(ssk->sk_err))
+                       __mptcp_error_report(sk);
                unlock_sock_fast(ssk, slowpath);
        } while (!done);
 
@@ -2299,7 +2315,7 @@ static void __mptcp_retrans(struct sock *sk)
        struct sock *ssk;
        int ret;
 
-       __mptcp_clean_una_wakeup(sk);
+       mptcp_clean_una_wakeup(sk);
        dfrag = mptcp_rtx_head(sk);
        if (!dfrag) {
                if (mptcp_data_fin_enabled(msk)) {
index 0c6f99c..385796f 100644 (file)
@@ -362,7 +362,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
 enum mptcp_data_avail {
        MPTCP_SUBFLOW_NODATA,
        MPTCP_SUBFLOW_DATA_AVAIL,
-       MPTCP_SUBFLOW_OOO_DATA
 };
 
 struct mptcp_delegated_action {
index bde6be7..be1de40 100644 (file)
@@ -630,21 +630,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 
        /* if the sk is MP_CAPABLE, we try to fetch the client key */
        if (subflow_req->mp_capable) {
-               if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
-                       /* here we can receive and accept an in-window,
-                        * out-of-order pkt, which will not carry the MP_CAPABLE
-                        * opt even on mptcp enabled paths
-                        */
-                       goto create_msk;
-               }
-
+               /* we can receive and accept an in-window, out-of-order pkt,
+                * which may not carry the MP_CAPABLE opt even on mptcp enabled
+                * paths: always try to extract the peer key, and fallback
+                * for packets missing it.
+                * Even OoO DSS packets coming legitly after dropped or
+                * reordered MPC will cause fallback, but we don't have other
+                * options.
+                */
                mptcp_get_options(skb, &mp_opt);
                if (!mp_opt.mp_capable) {
                        fallback = true;
                        goto create_child;
                }
 
-create_msk:
                new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
                if (!new_msk)
                        fallback = true;
@@ -785,10 +784,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
        return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
 }
 
-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
 {
-       WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
-                 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+       pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+                ssn, subflow->map_subflow_seq, subflow->map_data_len);
 }
 
 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
@@ -813,13 +812,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
                /* Mapping covers data later in the subflow stream,
                 * currently unsupported.
                 */
-               warn_bad_map(subflow, ssn);
+               dbg_bad_map(subflow, ssn);
                return false;
        }
        if (unlikely(!before(ssn, subflow->map_subflow_seq +
                                  subflow->map_data_len))) {
                /* Mapping does covers past subflow data, invalid */
-               warn_bad_map(subflow, ssn + skb->len);
+               dbg_bad_map(subflow, ssn);
                return false;
        }
        return true;
@@ -1001,7 +1000,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
        struct sk_buff *skb;
 
        if (!skb_peek(&ssk->sk_receive_queue))
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
        if (subflow->data_avail)
                return true;
 
@@ -1012,21 +1011,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
 
                status = get_mapping_status(ssk, msk);
                trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
-               if (status == MAPPING_INVALID) {
-                       ssk->sk_err = EBADMSG;
-                       goto fatal;
-               }
-               if (status == MAPPING_DUMMY) {
-                       __mptcp_do_fallback(msk);
-                       skb = skb_peek(&ssk->sk_receive_queue);
-                       subflow->map_valid = 1;
-                       subflow->map_seq = READ_ONCE(msk->ack_seq);
-                       subflow->map_data_len = skb->len;
-                       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
-                                                  subflow->ssn_offset;
-                       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
-                       return true;
-               }
+               if (unlikely(status == MAPPING_INVALID))
+                       goto fallback;
+
+               if (unlikely(status == MAPPING_DUMMY))
+                       goto fallback;
 
                if (status != MAPPING_OK)
                        goto no_data;
@@ -1039,10 +1028,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
                 * MP_CAPABLE-based mapping
                 */
                if (unlikely(!READ_ONCE(msk->can_ack))) {
-                       if (!subflow->mpc_map) {
-                               ssk->sk_err = EBADMSG;
-                               goto fatal;
-                       }
+                       if (!subflow->mpc_map)
+                               goto fallback;
                        WRITE_ONCE(msk->remote_key, subflow->remote_key);
                        WRITE_ONCE(msk->ack_seq, subflow->map_seq);
                        WRITE_ONCE(msk->can_ack, true);
@@ -1052,35 +1039,43 @@ static bool subflow_check_data_avail(struct sock *ssk)
                ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
                pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
                         ack_seq);
-               if (ack_seq == old_ack) {
-                       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
-                       break;
-               } else if (after64(ack_seq, old_ack)) {
-                       subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
-                       break;
+               if (unlikely(before64(ack_seq, old_ack))) {
+                       mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+                       continue;
                }
 
-               /* only accept in-sequence mapping. Old values are spurious
-                * retransmission
-                */
-               mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+               break;
        }
        return true;
 
 no_data:
        subflow_sched_work_if_closed(msk, ssk);
        return false;
-fatal:
-       /* fatal protocol error, close the socket */
-       /* This barrier is coupled with smp_rmb() in tcp_poll() */
-       smp_wmb();
-       ssk->sk_error_report(ssk);
-       tcp_set_state(ssk, TCP_CLOSE);
-       subflow->reset_transient = 0;
-       subflow->reset_reason = MPTCP_RST_EMPTCP;
-       tcp_send_active_reset(ssk, GFP_ATOMIC);
-       subflow->data_avail = 0;
-       return false;
+
+fallback:
+       /* RFC 8684 section 3.7. */
+       if (subflow->mp_join || subflow->fully_established) {
+               /* fatal protocol error, close the socket.
+                * subflow_error_report() will introduce the appropriate barriers
+                */
+               ssk->sk_err = EBADMSG;
+               tcp_set_state(ssk, TCP_CLOSE);
+               subflow->reset_transient = 0;
+               subflow->reset_reason = MPTCP_RST_EMPTCP;
+               tcp_send_active_reset(ssk, GFP_ATOMIC);
+               WRITE_ONCE(subflow->data_avail, 0);
+               return false;
+       }
+
+       __mptcp_do_fallback(msk);
+       skb = skb_peek(&ssk->sk_receive_queue);
+       subflow->map_valid = 1;
+       subflow->map_seq = READ_ONCE(msk->ack_seq);
+       subflow->map_data_len = skb->len;
+       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+       WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+       return true;
 }
 
 bool mptcp_subflow_data_available(struct sock *sk)
@@ -1091,7 +1086,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
        if (subflow->map_valid &&
            mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
                subflow->map_valid = 0;
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
 
                pr_debug("Done with mapping: seq=%u data_len=%u",
                         subflow->map_subflow_seq,
@@ -1119,41 +1114,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
        *full_space = tcp_full_space(sk);
 }
 
-static void subflow_data_ready(struct sock *sk)
-{
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       u16 state = 1 << inet_sk_state_load(sk);
-       struct sock *parent = subflow->conn;
-       struct mptcp_sock *msk;
-
-       msk = mptcp_sk(parent);
-       if (state & TCPF_LISTEN) {
-               /* MPJ subflow are removed from accept queue before reaching here,
-                * avoid stray wakeups
-                */
-               if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
-                       return;
-
-               set_bit(MPTCP_DATA_READY, &msk->flags);
-               parent->sk_data_ready(parent);
-               return;
-       }
-
-       WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
-                    !subflow->mp_join && !(state & TCPF_CLOSE));
-
-       if (mptcp_subflow_data_available(sk))
-               mptcp_data_ready(parent, sk);
-}
-
-static void subflow_write_space(struct sock *ssk)
-{
-       struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
-
-       mptcp_propagate_sndbuf(sk, ssk);
-       mptcp_write_space(sk);
-}
-
 void __mptcp_error_report(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow;
@@ -1194,6 +1154,43 @@ static void subflow_error_report(struct sock *ssk)
        mptcp_data_unlock(sk);
 }
 
+static void subflow_data_ready(struct sock *sk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       u16 state = 1 << inet_sk_state_load(sk);
+       struct sock *parent = subflow->conn;
+       struct mptcp_sock *msk;
+
+       msk = mptcp_sk(parent);
+       if (state & TCPF_LISTEN) {
+               /* MPJ subflow are removed from accept queue before reaching here,
+                * avoid stray wakeups
+                */
+               if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+                       return;
+
+               set_bit(MPTCP_DATA_READY, &msk->flags);
+               parent->sk_data_ready(parent);
+               return;
+       }
+
+       WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+                    !subflow->mp_join && !(state & TCPF_CLOSE));
+
+       if (mptcp_subflow_data_available(sk))
+               mptcp_data_ready(parent, sk);
+       else if (unlikely(sk->sk_err))
+               subflow_error_report(sk);
+}
+
+static void subflow_write_space(struct sock *ssk)
+{
+       struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+       mptcp_propagate_sndbuf(sk, ssk);
+       mptcp_write_space(sk);
+}
+
 static struct inet_connection_sock_af_ops *
 subflow_default_af_ops(struct sock *sk)
 {
@@ -1504,6 +1501,8 @@ static void subflow_state_change(struct sock *sk)
         */
        if (mptcp_subflow_data_available(sk))
                mptcp_data_ready(parent, sk);
+       else if (unlikely(sk->sk_err))
+               subflow_error_report(sk);
 
        subflow_sched_work_if_closed(mptcp_sk(parent), sk);
 
index d45dbcb..c250970 100644 (file)
@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
        svc->port = u->port;
        svc->fwmark = u->fwmark;
-       svc->flags = u->flags;
+       svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
        svc->timeout = u->timeout * HZ;
        svc->netmask = u->netmask;
        svc->ipvs = ipvs;
index 89e5bac..dc9ca12 100644 (file)
@@ -664,7 +664,7 @@ int nf_conntrack_proto_init(void)
 
 #if IS_ENABLED(CONFIG_IPV6)
 cleanup_sockopt:
-       nf_unregister_sockopt(&so_getorigdst6);
+       nf_unregister_sockopt(&so_getorigdst);
 #endif
        return ret;
 }
index b100c04..3d6d494 100644 (file)
@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
        int length = (th->doff * 4) - sizeof(*th);
        u8 buf[40], *ptr;
 
+       if (unlikely(length < 0))
+               return false;
+
        ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
        if (ptr == NULL)
                return false;
@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return true;
                        opsize = *ptr++;
                        if (opsize < 2)
                                return true;
index d63d2d8..bf4d6ec 100644 (file)
@@ -736,7 +736,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
-           nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+           nla_put_be32(skb, NFTA_TABLE_FLAGS,
+                        htonl(table->flags & NFT_TABLE_F_MASK)) ||
            nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
            nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
                         NFTA_TABLE_PAD))
@@ -947,20 +948,22 @@ err_register_hooks:
 
 static void nf_tables_table_disable(struct net *net, struct nft_table *table)
 {
+       table->flags &= ~NFT_TABLE_F_DORMANT;
        nft_table_disable(net, table, 0);
+       table->flags |= NFT_TABLE_F_DORMANT;
 }
 
-enum {
-       NFT_TABLE_STATE_UNCHANGED       = 0,
-       NFT_TABLE_STATE_DORMANT,
-       NFT_TABLE_STATE_WAKEUP
-};
+#define __NFT_TABLE_F_INTERNAL         (NFT_TABLE_F_MASK + 1)
+#define __NFT_TABLE_F_WAS_DORMANT      (__NFT_TABLE_F_INTERNAL << 0)
+#define __NFT_TABLE_F_WAS_AWAKEN       (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_UPDATE           (__NFT_TABLE_F_WAS_DORMANT | \
+                                        __NFT_TABLE_F_WAS_AWAKEN)
 
 static int nf_tables_updtable(struct nft_ctx *ctx)
 {
        struct nft_trans *trans;
        u32 flags;
-       int ret = 0;
+       int ret;
 
        if (!ctx->nla[NFTA_TABLE_FLAGS])
                return 0;
@@ -985,21 +988,27 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
 
        if ((flags & NFT_TABLE_F_DORMANT) &&
            !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
-               nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT;
+               ctx->table->flags |= NFT_TABLE_F_DORMANT;
+               if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
+                       ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
        } else if (!(flags & NFT_TABLE_F_DORMANT) &&
                   ctx->table->flags & NFT_TABLE_F_DORMANT) {
-               ret = nf_tables_table_enable(ctx->net, ctx->table);
-               if (ret >= 0)
-                       nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP;
+               ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+               if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
+                       ret = nf_tables_table_enable(ctx->net, ctx->table);
+                       if (ret < 0)
+                               goto err_register_hooks;
+
+                       ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
+               }
        }
-       if (ret < 0)
-               goto err;
 
-       nft_trans_table_flags(trans) = flags;
        nft_trans_table_update(trans) = true;
        nft_trans_commit_list_add_tail(ctx->net, trans);
+
        return 0;
-err:
+
+err_register_hooks:
        nft_trans_destroy(trans);
        return ret;
 }
@@ -1905,7 +1914,7 @@ static int nft_chain_parse_netdev(struct net *net,
 static int nft_chain_parse_hook(struct net *net,
                                const struct nlattr * const nla[],
                                struct nft_chain_hook *hook, u8 family,
-                               bool autoload)
+                               struct netlink_ext_ack *extack, bool autoload)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
        struct nlattr *ha[NFTA_HOOK_MAX + 1];
@@ -1935,8 +1944,10 @@ static int nft_chain_parse_hook(struct net *net,
        if (nla[NFTA_CHAIN_TYPE]) {
                type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
                                                   family, autoload);
-               if (IS_ERR(type))
+               if (IS_ERR(type)) {
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
                        return PTR_ERR(type);
+               }
        }
        if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
                return -EOPNOTSUPP;
@@ -1945,8 +1956,11 @@ static int nft_chain_parse_hook(struct net *net,
            hook->priority <= NF_IP_PRI_CONNTRACK)
                return -EOPNOTSUPP;
 
-       if (!try_module_get(type->owner))
+       if (!try_module_get(type->owner)) {
+               if (nla[NFTA_CHAIN_TYPE])
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
                return -ENOENT;
+       }
 
        hook->type = type;
 
@@ -2057,7 +2071,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
 static u64 chain_id;
 
 static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
-                             u8 policy, u32 flags)
+                             u8 policy, u32 flags,
+                             struct netlink_ext_ack *extack)
 {
        const struct nlattr * const *nla = ctx->nla;
        struct nft_table *table = ctx->table;
@@ -2079,7 +2094,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                if (flags & NFT_CHAIN_BINDING)
                        return -EOPNOTSUPP;
 
-               err = nft_chain_parse_hook(net, nla, &hook, family, true);
+               err = nft_chain_parse_hook(net, nla, &hook, family, extack,
+                                          true);
                if (err < 0)
                        return err;
 
@@ -2234,7 +2250,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
                        return -EEXIST;
                }
                err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
-                                          false);
+                                          extack, false);
                if (err < 0)
                        return err;
 
@@ -2447,7 +2463,7 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
                                          extack);
        }
 
-       return nf_tables_addchain(&ctx, family, genmask, policy, flags);
+       return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
 }
 
 static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
@@ -3328,8 +3344,10 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                        if (n == NFT_RULE_MAXEXPRS)
                                goto err1;
                        err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
-                       if (err < 0)
+                       if (err < 0) {
+                               NL_SET_BAD_ATTR(extack, tmp);
                                goto err1;
+                       }
                        size += expr_info[n].ops->size;
                        n++;
                }
@@ -4346,13 +4364,45 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        err = nf_tables_set_alloc_name(&ctx, set, name);
        kfree(name);
        if (err < 0)
-               goto err_set_alloc_name;
+               goto err_set_name;
+
+       udata = NULL;
+       if (udlen) {
+               udata = set->data + size;
+               nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+       }
+
+       INIT_LIST_HEAD(&set->bindings);
+       INIT_LIST_HEAD(&set->catchall_list);
+       set->table = table;
+       write_pnet(&set->net, net);
+       set->ops = ops;
+       set->ktype = ktype;
+       set->klen = desc.klen;
+       set->dtype = dtype;
+       set->objtype = objtype;
+       set->dlen = desc.dlen;
+       set->flags = flags;
+       set->size = desc.size;
+       set->policy = policy;
+       set->udlen = udlen;
+       set->udata = udata;
+       set->timeout = timeout;
+       set->gc_int = gc_int;
+
+       set->field_count = desc.field_count;
+       for (i = 0; i < desc.field_count; i++)
+               set->field_len[i] = desc.field_len[i];
+
+       err = ops->init(set, &desc, nla);
+       if (err < 0)
+               goto err_set_init;
 
        if (nla[NFTA_SET_EXPR]) {
                expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
                if (IS_ERR(expr)) {
                        err = PTR_ERR(expr);
-                       goto err_set_alloc_name;
+                       goto err_set_expr_alloc;
                }
                set->exprs[0] = expr;
                set->num_exprs++;
@@ -4363,75 +4413,44 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
 
                if (!(flags & NFT_SET_EXPR)) {
                        err = -EINVAL;
-                       goto err_set_alloc_name;
+                       goto err_set_expr_alloc;
                }
                i = 0;
                nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
                        if (i == NFT_SET_EXPR_MAX) {
                                err = -E2BIG;
-                               goto err_set_init;
+                               goto err_set_expr_alloc;
                        }
                        if (nla_type(tmp) != NFTA_LIST_ELEM) {
                                err = -EINVAL;
-                               goto err_set_init;
+                               goto err_set_expr_alloc;
                        }
                        expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
                        if (IS_ERR(expr)) {
                                err = PTR_ERR(expr);
-                               goto err_set_init;
+                               goto err_set_expr_alloc;
                        }
                        set->exprs[i++] = expr;
                        set->num_exprs++;
                }
        }
 
-       udata = NULL;
-       if (udlen) {
-               udata = set->data + size;
-               nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
-       }
-
-       INIT_LIST_HEAD(&set->bindings);
-       INIT_LIST_HEAD(&set->catchall_list);
-       set->table = table;
-       write_pnet(&set->net, net);
-       set->ops   = ops;
-       set->ktype = ktype;
-       set->klen  = desc.klen;
-       set->dtype = dtype;
-       set->objtype = objtype;
-       set->dlen  = desc.dlen;
-       set->flags = flags;
-       set->size  = desc.size;
-       set->policy = policy;
-       set->udlen  = udlen;
-       set->udata  = udata;
-       set->timeout = timeout;
-       set->gc_int = gc_int;
        set->handle = nf_tables_alloc_handle(table);
 
-       set->field_count = desc.field_count;
-       for (i = 0; i < desc.field_count; i++)
-               set->field_len[i] = desc.field_len[i];
-
-       err = ops->init(set, &desc, nla);
-       if (err < 0)
-               goto err_set_init;
-
        err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
        if (err < 0)
-               goto err_set_trans;
+               goto err_set_expr_alloc;
 
        list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
-err_set_trans:
-       ops->destroy(set);
-err_set_init:
+err_set_expr_alloc:
        for (i = 0; i < set->num_exprs; i++)
                nft_expr_destroy(&ctx, set->exprs[i]);
-err_set_alloc_name:
+
+       ops->destroy(set);
+err_set_init:
        kfree(set->name);
 err_set_name:
        kvfree(set);
@@ -8547,10 +8566,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                switch (trans->msg_type) {
                case NFT_MSG_NEWTABLE:
                        if (nft_trans_table_update(trans)) {
-                               if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT)
+                               if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
+                               if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
                                        nf_tables_table_disable(net, trans->ctx.table);
 
-                               trans->ctx.table->flags = nft_trans_table_flags(trans);
+                               trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
                        } else {
                                nft_clear(net, trans->ctx.table);
                        }
@@ -8768,9 +8791,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                switch (trans->msg_type) {
                case NFT_MSG_NEWTABLE:
                        if (nft_trans_table_update(trans)) {
-                               if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP)
+                               if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
+                               if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
                                        nf_tables_table_disable(net, trans->ctx.table);
-
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+                                       trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+                               }
+                               trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
                                nft_trans_destroy(trans);
                        } else {
                                list_del_rcu(&trans->ctx.table->list);
index 322ac5d..752b10c 100644 (file)
@@ -380,10 +380,14 @@ static int
 nfnl_cthelper_update(const struct nlattr * const tb[],
                     struct nf_conntrack_helper *helper)
 {
+       u32 size;
        int ret;
 
-       if (tb[NFCTH_PRIV_DATA_LEN])
-               return -EBUSY;
+       if (tb[NFCTH_PRIV_DATA_LEN]) {
+               size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+               if (size != helper->data_len)
+                       return -EBUSY;
+       }
 
        if (tb[NFCTH_POLICY]) {
                ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
index 0592a94..337e22d 100644 (file)
@@ -1217,7 +1217,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
        struct nf_conn *ct;
 
        ct = nf_ct_get(pkt->skb, &ctinfo);
-       if (!ct || ctinfo == IP_CT_UNTRACKED) {
+       if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
                regs->verdict.code = NFT_BREAK;
                return;
        }
index 53dbe73..6cfd30f 100644 (file)
@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        if (!llcp_sock->service_name) {
                nfc_llcp_local_put(llcp_sock->local);
                llcp_sock->local = NULL;
+               llcp_sock->dev = NULL;
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                llcp_sock->local = NULL;
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
+               llcp_sock->dev = NULL;
                ret = -EADDRINUSE;
                goto put_dev;
        }
index ae906eb..330ba68 100644 (file)
@@ -2683,7 +2683,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        }
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
-               proto   = po->num;
+               proto   = READ_ONCE(po->num);
        } else {
                err = -EINVAL;
                if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2896,7 +2896,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
-               proto   = po->num;
+               proto   = READ_ONCE(po->num);
        } else {
                err = -EINVAL;
                if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -3034,10 +3034,13 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
 
-       if (po->tx_ring.pg_vec)
+       /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
+        * tpacket_snd() will redo the check safely.
+        */
+       if (data_race(po->tx_ring.pg_vec))
                return tpacket_snd(po, msg);
-       else
-               return packet_snd(sock, msg, len);
+
+       return packet_snd(sock, msg, len);
 }
 
 /*
@@ -3168,7 +3171,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
                        /* prevents packet_notifier() from calling
                         * register_prot_hook()
                         */
-                       po->num = 0;
+                       WRITE_ONCE(po->num, 0);
                        __unregister_prot_hook(sk, true);
                        rcu_read_lock();
                        dev_curr = po->prot_hook.dev;
@@ -3178,17 +3181,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
                }
 
                BUG_ON(po->running);
-               po->num = proto;
+               WRITE_ONCE(po->num, proto);
                po->prot_hook.type = proto;
 
                if (unlikely(unlisted)) {
                        dev_put(dev);
                        po->prot_hook.dev = NULL;
-                       po->ifindex = -1;
+                       WRITE_ONCE(po->ifindex, -1);
                        packet_cached_dev_reset(po);
                } else {
                        po->prot_hook.dev = dev;
-                       po->ifindex = dev ? dev->ifindex : 0;
+                       WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
                        packet_cached_dev_assign(po, dev);
                }
        }
@@ -3502,7 +3505,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
        uaddr->sa_family = AF_PACKET;
        memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
        rcu_read_lock();
-       dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+       dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
        if (dev)
                strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
        rcu_read_unlock();
@@ -3517,16 +3520,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
+       int ifindex;
 
        if (peer)
                return -EOPNOTSUPP;
 
+       ifindex = READ_ONCE(po->ifindex);
        sll->sll_family = AF_PACKET;
-       sll->sll_ifindex = po->ifindex;
-       sll->sll_protocol = po->num;
+       sll->sll_ifindex = ifindex;
+       sll->sll_protocol = READ_ONCE(po->num);
        sll->sll_pkttype = 0;
        rcu_read_lock();
-       dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
+       dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
        if (dev) {
                sll->sll_hatype = dev->type;
                sll->sll_halen = dev->addr_len;
@@ -4105,7 +4110,7 @@ static int packet_notifier(struct notifier_block *this,
                                }
                                if (msg == NETDEV_UNREGISTER) {
                                        packet_cached_dev_reset(po);
-                                       po->ifindex = -1;
+                                       WRITE_ONCE(po->ifindex, -1);
                                        if (po->prot_hook.dev)
                                                dev_put(po->prot_hook.dev);
                                        po->prot_hook.dev = NULL;
@@ -4411,7 +4416,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        was_running = po->running;
        num = po->num;
        if (was_running) {
-               po->num = 0;
+               WRITE_ONCE(po->num, 0);
                __unregister_prot_hook(sk, false);
        }
        spin_unlock(&po->bind_lock);
@@ -4446,7 +4451,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 
        spin_lock(&po->bind_lock);
        if (was_running) {
-               po->num = num;
+               WRITE_ONCE(po->num, num);
                register_prot_hook(sk);
        }
        spin_unlock(&po->bind_lock);
@@ -4616,8 +4621,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
                           s,
                           refcount_read(&s->sk_refcnt),
                           s->sk_type,
-                          ntohs(po->num),
-                          po->ifindex,
+                          ntohs(READ_ONCE(po->num)),
+                          READ_ONCE(po->ifindex),
                           po->running,
                           atomic_read(&s->sk_rmem_alloc),
                           from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
index c0477be..f2efaa4 100644 (file)
@@ -436,7 +436,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
        struct qrtr_sock *ipc;
        struct sk_buff *skb;
        struct qrtr_cb *cb;
-       unsigned int size;
+       size_t size;
        unsigned int ver;
        size_t hdrlen;
 
index 4db109f..5b426dc 100644 (file)
@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 
                if (rds_cmsg_recv(inc, msg, rs)) {
                        ret = -EFAULT;
-                       goto out;
+                       break;
                }
                rds_recvmsg_zcookie(rs, msg);
 
index ec7a1c4..a656baa 100644 (file)
@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
        }
 
        err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
-       if (err == NF_ACCEPT &&
-           ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
-               if (maniptype == NF_NAT_MANIP_SRC)
-                       maniptype = NF_NAT_MANIP_DST;
-               else
-                       maniptype = NF_NAT_MANIP_SRC;
-
-               err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+       if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+               if (ct->status & IPS_SRC_NAT) {
+                       if (maniptype == NF_NAT_MANIP_SRC)
+                               maniptype = NF_NAT_MANIP_DST;
+                       else
+                               maniptype = NF_NAT_MANIP_SRC;
+
+                       err = ct_nat_execute(skb, ct, ctinfo, range,
+                                            maniptype);
+               } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+                       err = ct_nat_execute(skb, ct, ctinfo, NULL,
+                                            NF_NAT_MANIP_SRC);
+               }
        }
        return err;
 #else
@@ -984,7 +989,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
         */
        cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
        if (!cached) {
-               if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+               if (tcf_ct_flow_table_lookup(p, skb, family)) {
                        skip_add = true;
                        goto do_nat;
                }
@@ -1022,10 +1027,11 @@ do_nat:
                 * even if the connection is already confirmed.
                 */
                nf_conntrack_confirm(skb);
-       } else if (!skip_add) {
-               tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
        }
 
+       if (!skip_add)
+               tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+
 out_push:
        skb_push_rcsum(skb, nh_ofs);
 
@@ -1202,9 +1208,6 @@ static int tcf_ct_fill_params(struct net *net,
                                   sizeof(p->zone));
        }
 
-       if (p->zone == NF_CT_DEFAULT_ZONE_ID)
-               return 0;
-
        nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
        tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
        if (!tmpl) {
index 7d37638..9515428 100644 (file)
@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
        }
 
        tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
-       if (!tcph)
+       if (!tcph || tcph->doff < 5)
                return NULL;
 
        return skb_header_pointer(skb, offset,
@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
                        length--;
                        continue;
                }
+               if (length < 2)
+                       break;
                opsize = *ptr++;
                if (opsize < 2 || opsize > length)
                        break;
@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
                        length--;
                        continue;
                }
+               if (length < 2)
+                       break;
                opsize = *ptr++;
                if (opsize < 2 || opsize > length)
                        break;
@@ -2338,7 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch)
 
 /*     List of known Diffserv codepoints:
  *
- *     Least Effort (CS1)
+ *     Least Effort (CS1, LE)
  *     Best Effort (CS0)
  *     Max Reliability & LLT "Lo" (TOS1)
  *     Max Throughput (TOS2)
@@ -2360,7 +2364,7 @@ static int cake_config_precedence(struct Qdisc *sch)
  *     Total 25 codepoints.
  */
 
-/*     List of traffic classes in RFC 4594:
+/*     List of traffic classes in RFC 4594, updated by RFC 8622:
  *             (roughly descending order of contended priority)
  *             (roughly ascending order of uncontended throughput)
  *
@@ -2375,7 +2379,7 @@ static int cake_config_precedence(struct Qdisc *sch)
  *     Ops, Admin, Management (CS2,TOS1) - eg. ssh
  *     Standard Service (CS0 & unrecognised codepoints)
  *     High Throughput Data (AF1x,TOS2)  - eg. web traffic
- *     Low Priority Data (CS1)           - eg. BitTorrent
+ *     Low Priority Data (CS1,LE)        - eg. BitTorrent
 
  *     Total 12 traffic classes.
  */
@@ -2391,7 +2395,7 @@ static int cake_config_diffserv8(struct Qdisc *sch)
  *             Video Streaming          (AF4x, AF3x, CS3)
  *             Bog Standard             (CS0 etc.)
  *             High Throughput          (AF1x, TOS2)
- *             Background Traffic       (CS1)
+ *             Background Traffic       (CS1, LE)
  *
  *             Total 8 traffic classes.
  */
@@ -2435,7 +2439,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
  *         Latency Sensitive  (CS7, CS6, EF, VA, CS5, CS4)
  *         Streaming Media    (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
  *         Best Effort        (CS0, AF1x, TOS2, and those not specified)
- *         Background Traffic (CS1)
+ *         Background Traffic (CS1, LE)
  *
  *             Total 4 traffic classes.
  */
@@ -2473,7 +2477,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
 static int cake_config_diffserv3(struct Qdisc *sch)
 {
 /*  Simplified Diffserv structure with 3 tins.
- *             Low Priority            (CS1)
+ *             Low Priority            (CS1, LE)
  *             Best Effort
  *             Latency Sensitive       (TOS4, VA, EF, CS6, CS7)
  */
index 081c11d..8827987 100644 (file)
@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch,
        struct Qdisc *old_q;
 
        /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
-       qdisc_refcount_inc(new_q);
+       if (new_q)
+               qdisc_refcount_inc(new_q);
        old_q = htb_graft_helper(dev_queue, new_q);
        WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
 }
@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q) {
+                       if (new_q)
                                htb_set_lockdep_class_child(new_q);
-                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
-                       }
+                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
                }
        }
 
index 27e3e7d..4f2c6d2 100644 (file)
@@ -1072,19 +1072,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
  *     what to do with it - that's up to the protocol still.
  */
 
-/**
- *     get_net_ns - increment the refcount of the network namespace
- *     @ns: common namespace (net)
- *
- *     Returns the net's common namespace.
- */
-
-struct ns_common *get_net_ns(struct ns_common *ns)
-{
-       return &get_net(container_of(ns, struct net, ns))->ns;
-}
-EXPORT_SYMBOL_GPL(get_net_ns);
-
 static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 {
        struct socket *sock;
index 76a6f8c..bd9f156 100644 (file)
@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
 static LIST_HEAD(tls_device_gc_list);
 static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
 static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
        struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
        struct net_device *netdev;
 
-       if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
-               return;
-
        trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+       rcu_read_lock();
        netdev = READ_ONCE(tls_ctx->netdev);
        if (netdev)
                netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
                                                   TLS_OFFLOAD_CTX_DIR_RX);
-       clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+       rcu_read_unlock();
        TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 }
 
@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 
        if (tls_ctx->rx_conf != TLS_HW)
                return;
+       if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+               return;
 
        prot = &tls_ctx->prot_info;
        rx_ctx = tls_offload_ctx_rx(tls_ctx);
@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
 
        ctx->sw.decrypted |= is_decrypted;
 
+       if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+               if (likely(is_encrypted || is_decrypted))
+                       return 0;
+
+               /* After tls_device_down disables the offload, the next SKB will
+                * likely have initial fragments decrypted, and final ones not
+                * decrypted. We need to reencrypt that single SKB.
+                */
+               return tls_device_reencrypt(sk, skb);
+       }
+
        /* Return immediately if the record is either entirely plaintext or
         * entirely ciphertext. Otherwise handle reencrypt partially decrypted
         * record.
@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev)
        spin_unlock_irqrestore(&tls_device_lock, flags);
 
        list_for_each_entry_safe(ctx, tmp, &list, list) {
+               /* Stop offloaded TX and switch to the fallback.
+                * tls_is_sk_tx_device_offloaded will return false.
+                */
+               WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+               /* Stop the RX and TX resync.
+                * tls_dev_resync must not be called after tls_dev_del.
+                */
+               WRITE_ONCE(ctx->netdev, NULL);
+
+               /* Start skipping the RX resync logic completely. */
+               set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+               /* Sync with inflight packets. After this point:
+                * TX: no non-encrypted packets will be passed to the driver.
+                * RX: resync requests from the driver will be ignored.
+                */
+               synchronize_net();
+
+               /* Release the offload context on the driver side. */
                if (ctx->tx_conf == TLS_HW)
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_TX);
@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev)
                    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_RX);
-               WRITE_ONCE(ctx->netdev, NULL);
-               smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
-               while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
-                       usleep_range(10, 200);
+
                dev_put(netdev);
-               list_del_init(&ctx->list);
 
-               if (refcount_dec_and_test(&ctx->refcount))
-                       tls_device_free_ctx(ctx);
+               /* Move the context to a separate list for two reasons:
+                * 1. When the context is deallocated, list_del is called.
+                * 2. It's no longer an offloaded context, so we don't want to
+                *    run offload-specific code on this context.
+                */
+               spin_lock_irqsave(&tls_device_lock, flags);
+               list_move_tail(&ctx->list, &tls_device_down_list);
+               spin_unlock_irqrestore(&tls_device_lock, flags);
+
+               /* Device contexts for RX and TX will be freed in on sk_destruct
+                * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+                */
        }
 
        up_write(&device_offload_lock);
index cacf040..e40bedd 100644 (file)
@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
 
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+                                        struct net_device *dev,
+                                        struct sk_buff *skb)
+{
+       return tls_sw_fallback(sk, skb);
+}
+
 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
 {
        return tls_sw_fallback(skb->sk, skb);
index 47b7c53..fde56ff 100644 (file)
@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
        mutex_init(&ctx->tx_lock);
        rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
        ctx->sk_proto = READ_ONCE(sk->sk_prot);
+       ctx->sk = sk;
        return ctx;
 }
 
index 5a31307..5d1192c 100644 (file)
@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
        u->path.mnt = NULL;
        state = sk->sk_state;
        sk->sk_state = TCP_CLOSE;
+
+       skpair = unix_peer(sk);
+       unix_peer(sk) = NULL;
+
        unix_state_unlock(sk);
 
        wake_up_interruptible_all(&u->peer_wait);
 
-       skpair = unix_peer(sk);
-
        if (skpair != NULL) {
                if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
                        unix_state_lock(skpair);
@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
 
                unix_dgram_peer_wake_disconnect(sk, skpair);
                sock_put(skpair); /* It may now die */
-               unix_peer(sk) = NULL;
        }
 
        /* Try to flush out this socket. Throw out buffers at least */
index 2eee939..af590ae 100644 (file)
@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
        @$(kecho) "  GEN     $@"
        @(echo '#include "reg.h"'; \
          echo 'const u8 shipped_regdb_certs[] = {'; \
-         cat $^ ; \
+         echo | cat - $^ ; \
          echo '};'; \
          echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
         ) > $@
index 6fbf753..8d0883e 100644 (file)
@@ -1340,6 +1340,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
        rdev->devlist_generation++;
        wdev->registered = true;
 
+       if (wdev->netdev &&
+           sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
+                             "phy80211"))
+               pr_err("failed to add phy80211 symlink to netdev!\n");
+
        nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
 }
 
@@ -1365,14 +1370,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
        if (ret)
                goto out;
 
-       if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
-                             "phy80211")) {
-               pr_err("failed to add phy80211 symlink to netdev!\n");
-               unregister_netdevice(dev);
-               ret = -EINVAL;
-               goto out;
-       }
-
        cfg80211_register_wdev(rdev, wdev);
        ret = 0;
 out:
index 6bdd964..d245968 100644 (file)
@@ -334,6 +334,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
                            gfp_t gfp)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+       struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
        struct sk_buff *msg;
        void *hdr;
 
@@ -364,9 +365,20 @@ free_msg:
        nlmsg_free(msg);
 free_request:
        spin_lock_bh(&wdev->pmsr_lock);
-       list_del(&req->list);
+       /*
+        * cfg80211_pmsr_process_abort() may have already moved this request
+        * to the free list, and will free it later. In this case, don't free
+        * it here.
+        */
+       list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
+               if (tmp == req) {
+                       list_del(&req->list);
+                       to_free = req;
+                       break;
+               }
+       }
        spin_unlock_bh(&wdev->pmsr_lock);
-       kfree(req);
+       kfree(to_free);
 }
 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
 
index 9b959e3..0c3f05c 100644 (file)
@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
        if (rdev->wiphy.registered && rdev->ops->resume)
                ret = rdev_resume(rdev);
        wiphy_unlock(&rdev->wiphy);
+
+       if (ret)
+               cfg80211_shutdown_all_interfaces(&rdev->wiphy);
+
        rtnl_unlock();
 
        return ret;
index 7ec021a..18dba3d 100644 (file)
@@ -1059,6 +1059,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                case NL80211_IFTYPE_MESH_POINT:
                        /* mesh should be handled? */
                        break;
+               case NL80211_IFTYPE_OCB:
+                       cfg80211_leave_ocb(rdev, dev);
+                       break;
                default:
                        break;
                }
index 44d6566..1816899 100644 (file)
@@ -536,7 +536,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
        if (protocol)
                goto out;
 
-       rc = -ENOBUFS;
+       rc = -ENOMEM;
        if ((sk = x25_alloc_socket(net, kern)) == NULL)
                goto out;
 
index 21dbf63..9ec93d9 100644 (file)
@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
        if (format != DRM_FORMAT_XRGB8888) {
                pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
                        format, DRM_FORMAT_XRGB8888);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (width < 100  || width > 10000) {
                pci_err(pdev, "width (%d) out of range\n", width);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (height < 100 || height > 10000) {
                pci_err(pdev, "height (%d) out of range\n", height);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
                 width, height);
 
        info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
-       if (!info)
+       if (!info) {
+               ret = -ENOMEM;
                goto err_release_regions;
+       }
        pci_set_drvdata(pdev, info);
        par = info->par;
 
index dd87cea..a7883e4 100644 (file)
@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M]  $@
 quiet_cmd_btf_ko = BTF [M] $@
       cmd_btf_ko =                                                     \
        if [ -f vmlinux ]; then                                         \
-               LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \
+               LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
        else                                                            \
                printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
        fi;
index f4de4c9..0e0f646 100755 (executable)
@@ -240,7 +240,7 @@ gen_btf()
        fi
 
        info "BTF" ${2}
-       LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${extra_paholeopt} ${1}
+       LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1}
 
        # Create ${2} which contains just .BTF section but no symbols. Add
        # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
index f9b1952..1e9baa5 100644 (file)
@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
                                 Elf32_Word const *symtab_shndx)
 {
        unsigned long offset;
+       unsigned short shndx = w2(sym->st_shndx);
        int index;
 
-       if (sym->st_shndx != SHN_XINDEX)
-               return w2(sym->st_shndx);
+       if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
+               return shndx;
 
-       offset = (unsigned long)sym - (unsigned long)symtab;
-       index = offset / sizeof(*sym);
+       if (shndx == SHN_XINDEX) {
+               offset = (unsigned long)sym - (unsigned long)symtab;
+               index = offset / sizeof(*sym);
 
-       return w(symtab_shndx[index]);
+               return w(symtab_shndx[index]);
+       }
+
+       return 0;
 }
 
 static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
index 25f57c1..a90e31d 100644 (file)
@@ -17,6 +17,9 @@ MODULE_LICENSE("GPL");
 #define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \
                        >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1)
 
+#define to_led_card_dev(_dev) \
+       container_of(_dev, struct snd_ctl_led_card, dev)
+
 enum snd_ctl_led_mode {
         MODE_FOLLOW_MUTE = 0,
         MODE_FOLLOW_ROUTE,
@@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card)
        snd_ctl_led_refresh();
 }
 
+static void snd_ctl_led_card_release(struct device *dev)
+{
+       struct snd_ctl_led_card *led_card = to_led_card_dev(dev);
+
+       kfree(led_card);
+}
+
+static void snd_ctl_led_release(struct device *dev)
+{
+}
+
+static void snd_ctl_led_dev_release(struct device *dev)
+{
+}
+
 /*
  * sysfs
  */
@@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
                led_card->number = card->number;
                led_card->led = led;
                device_initialize(&led_card->dev);
+               led_card->dev.release = snd_ctl_led_card_release;
                if (dev_set_name(&led_card->dev, "card%d", card->number) < 0)
                        goto cerr;
                led_card->dev.parent = &led->dev;
@@ -681,7 +700,6 @@ cerr:
                put_device(&led_card->dev);
 cerr2:
                printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number);
-               kfree(led_card);
        }
 }
 
@@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card)
                snprintf(link_name, sizeof(link_name), "led-%s", led->name);
                sysfs_remove_link(&card->ctl_dev.kobj, link_name);
                sysfs_remove_link(&led_card->dev.kobj, "card");
-               device_del(&led_card->dev);
-               kfree(led_card);
+               device_unregister(&led_card->dev);
                led->cards[card->number] = NULL;
        }
 }
@@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void)
 
        device_initialize(&snd_ctl_led_dev);
        snd_ctl_led_dev.class = sound_class;
+       snd_ctl_led_dev.release = snd_ctl_led_dev_release;
        dev_set_name(&snd_ctl_led_dev, "ctl-led");
        if (device_add(&snd_ctl_led_dev)) {
                put_device(&snd_ctl_led_dev);
@@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void)
                INIT_LIST_HEAD(&led->controls);
                device_initialize(&led->dev);
                led->dev.parent = &snd_ctl_led_dev;
+               led->dev.release = snd_ctl_led_release;
                led->dev.groups = snd_ctl_led_dev_attr_groups;
                dev_set_name(&led->dev, led->name);
                if (device_add(&led->dev)) {
                        put_device(&led->dev);
                        for (; group > 0; group--) {
                                led = &snd_ctl_leds[group - 1];
-                               device_del(&led->dev);
+                               device_unregister(&led->dev);
                        }
-                       device_del(&snd_ctl_led_dev);
+                       device_unregister(&snd_ctl_led_dev);
                        return -ENOMEM;
                }
        }
@@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void)
        }
        for (group = 0; group < MAX_LED; group++) {
                led = &snd_ctl_leds[group];
-               device_del(&led->dev);
+               device_unregister(&led->dev);
        }
-       device_del(&snd_ctl_led_dev);
+       device_unregister(&snd_ctl_led_dev);
        snd_ctl_led_clean(NULL);
 }
 
index 1645e41..9863be6 100644 (file)
@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
                return err;
        }
        spin_lock_irq(&tmr->lock);
-       tmr->timeri = t;
+       if (tmr->timeri)
+               err = -EBUSY;
+       else
+               tmr->timeri = t;
        spin_unlock_irq(&tmr->lock);
+       if (err < 0) {
+               snd_timer_close(t);
+               snd_timer_instance_free(t);
+               return err;
+       }
        return 0;
 }
 
index 6898b1a..92b7008 100644 (file)
@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
                return;
        if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
                return;
+       event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
        list_for_each_entry(ts, &ti->slave_active_head, active_list)
                if (ts->ccallback)
-                       ts->ccallback(ts, event + 100, &tstamp, resolution);
+                       ts->ccallback(ts, event, &tstamp, resolution);
 }
 
 /* start/continue a master timer */
index e0faa66..5805c5d 100644 (file)
@@ -804,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
 static inline void cancel_stream(struct amdtp_stream *s)
 {
        s->packet_index = -1;
-       if (current_work() == &s->period_work)
+       if (in_interrupt())
                amdtp_stream_pcm_abort(s);
        WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 }
index ab5ff78..d8be146 100644 (file)
@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51c8,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
 #endif
 
 };
index a31009a..5462f77 100644 (file)
@@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int hda_codec_pm_prepare(struct device *dev)
 {
+       dev->power.power_state = PMSG_SUSPEND;
        return pm_runtime_suspended(dev);
 }
 
@@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
+       /* If no other pm-functions are called between prepare() and complete() */
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND)
+               dev->power.power_state = PMSG_RESUME;
+
        if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
            hda_codec_need_resume(codec) || codec->forced_resume))
                pm_request_resume(dev);
index b638fc2..1f8018f 100644 (file)
@@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new cap_sw_temp = {
        .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
        .name = "Capture Switch",
+       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
        .info = cap_sw_info,
        .get = cap_sw_get,
        .put = cap_sw_put,
index 79ade33..470753b 100644 (file)
@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-P */
        { PCI_DEVICE(0x8086, 0x51c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-M */
+       { PCI_DEVICE(0x8086, 0x51cc),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 726507d..8629e84 100644 (file)
@@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec,
                break;
        case HDA_FIXUP_ACT_PROBE:
 
-               /* Set initial volume on Bullseye to -26 dB */
-               if (codec->fixup_id == CS8409_BULLSEYE)
-                       snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
-                                       HDA_INPUT, 0, 0xff, 0x19);
+               /* Set initial DMIC volume to -26 dB */
+               snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
+                               HDA_INPUT, 0, 0xff, 0x19);
                snd_hda_gen_add_kctl(&spec->gen,
                        NULL, &cs8409_cs42l42_hp_volume_mixer);
                snd_hda_gen_add_kctl(&spec->gen,
index 61a60c4..ab5113c 100644 (file)
@@ -6568,6 +6568,7 @@ enum {
        ALC285_FIXUP_HP_SPECTRE_X360,
        ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
        ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+       ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8146,6 +8147,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC283_FIXUP_HEADSET_MIC,
        },
+       [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x03211030 }, /* Change the Headphone location to Left */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8182,6 +8192,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -8303,12 +8314,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -8327,10 +8341,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -8736,6 +8752,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
        {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
        {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+       {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index 87f5709..4a50b16 100644 (file)
@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
                NULL, 0),
-       SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
-               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+               0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
                RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+       SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+               NULL, 0),
        SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
                RT5659_PWR_VREF3_BIT, 0, NULL, 0),
 
@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
                RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
 
        /* Input Side */
-       SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
-               0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
                0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
 
 static int rt5659_probe(struct snd_soc_component *component)
 {
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
        struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
 
        rt5659->component = component;
 
+       switch (rt5659->pdata.jd_src) {
+       case RT5659_JD_HDA_HEADER:
+               break;
+
+       default:
+               snd_soc_dapm_new_controls(dapm,
+                       rt5659_particular_dapm_widgets,
+                       ARRAY_SIZE(rt5659_particular_dapm_widgets));
+               break;
+       }
+
        return 0;
 }
 
index fed80c8..e78ba3b 100644 (file)
@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
 
        regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
                RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
-       regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+       regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
+       regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
        regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
                RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
        regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
index 81866ae..55b2a1f 100644 (file)
 #define TAS2562_TDM_CFG0_RAMPRATE_MASK         BIT(5)
 #define TAS2562_TDM_CFG0_RAMPRATE_44_1         BIT(5)
 #define TAS2562_TDM_CFG0_SAMPRATE_MASK         GENMASK(3, 1)
-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ    0x0
-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ   0x1
-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ  0x2
-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ   0x3
-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ   0x4
-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ   0x5
-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ    (0x0 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ   (0x1 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ  (0x2 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ   (0x3 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ   (0x4 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ   (0x5 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
 
 #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
 
index c62bfd1..4f55b31 100644 (file)
@@ -744,6 +744,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
        /* Initialize sound card */
        priv->pdev = pdev;
        priv->card.dev = &pdev->dev;
+       priv->card.owner = THIS_MODULE;
        ret = snd_soc_of_parse_card_name(&priv->card, "model");
        if (ret) {
                snprintf(priv->name, sizeof(priv->name), "%s-audio",
index 28c7497..a6e95db 100644 (file)
@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
                struct snd_soc_dai *dai)
 {
        struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+       unsigned int id = dai->driver->id;
 
        clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
+       /*
+        * Ensure LRCLK is disabled even in device node validation.
+        * Will not impact if disabled in lpass_cpu_daiops_trigger()
+        * suspend.
+        */
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
+       else
+               regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
+
+       /*
+        * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
+        * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
+        * lpass_cpu_daiops_prepare.
+        */
+       if (drvdata->mi2s_was_prepared[dai->driver->id]) {
+               drvdata->mi2s_was_prepared[dai->driver->id] = false;
+               clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+       }
+
        clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
 }
 
@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               /*
+                * Ensure lpass BCLK/LRCLK is enabled during
+                * device resume as lpass_cpu_daiops_prepare() is not called
+                * after the device resumes. We don't check mi2s_was_prepared before
+                * enable/disable BCLK in trigger events because:
+                *  1. These trigger events are paired, so the BCLK
+                *     enable_count is balanced.
+                *  2. the BCLK can be shared (ex: headset and headset mic),
+                *     we need to increase the enable_count so that we don't
+                *     turn off the shared BCLK while other devices are using
+                *     it.
+                */
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        ret = regmap_fields_write(i2sctl->spken, id,
                                                 LPAIF_I2SCTL_SPKEN_ENABLE);
@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               /*
+                * To ensure lpass BCLK/LRCLK is disabled during
+                * device suspend.
+                */
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        ret = regmap_fields_write(i2sctl->spken, id,
                                                 LPAIF_I2SCTL_SPKEN_DISABLE);
@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        return ret;
 }
 
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+               struct snd_soc_dai *dai)
+{
+       struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+       unsigned int id = dai->driver->id;
+       int ret;
+
+       /*
+        * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
+        * data flow starts. This allows other codec to have some delay before
+        * the data flow.
+        * (ex: to drop start up pop noise before capture starts).
+        */
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
+       else
+               ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
+
+       if (ret) {
+               dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+               return ret;
+       }
+
+       /*
+        * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
+        * be called multiple times. It's paired with the clk_disable in
+        * lpass_cpu_daiops_shutdown.
+        */
+       if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
+               ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+               if (ret) {
+                       dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+                       return ret;
+               }
+               drvdata->mi2s_was_prepared[dai->driver->id] = true;
+       }
+       return 0;
+}
+
 const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
        .set_sysclk     = lpass_cpu_daiops_set_sysclk,
        .startup        = lpass_cpu_daiops_startup,
        .shutdown       = lpass_cpu_daiops_shutdown,
        .hw_params      = lpass_cpu_daiops_hw_params,
        .trigger        = lpass_cpu_daiops_trigger,
+       .prepare        = lpass_cpu_daiops_prepare,
 };
 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
 
index 83b2e08..7f72214 100644 (file)
@@ -67,6 +67,10 @@ struct lpass_data {
        /* MI2S SD lines to use for playback/capture */
        unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
        unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
+       /* The state of MI2S prepare dai_ops was called */
+       bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
+
        int hdmi_port_enable;
 
        /* low-power audio interface (LPAIF) registers */
index 1c0904a..a76974c 100644 (file)
@@ -2225,6 +2225,8 @@ static char *fmt_single_name(struct device *dev, int *id)
                return NULL;
 
        name = devm_kstrdup(dev, devname, GFP_KERNEL);
+       if (!name)
+               return NULL;
 
        /* are we a "%s.%d" name (platform and SPI components) */
        found = strstr(name, dev->driver->name);
index 73076d4..4893a56 100644 (file)
@@ -1901,7 +1901,7 @@ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
  * @src: older version of pcm as a source
  * @pcm: latest version of pcm created from the source
  *
- * Support from vesion 4. User should free the returned pcm manually.
+ * Support from version 4. User should free the returned pcm manually.
  */
 static int pcm_new_ver(struct soc_tplg *tplg,
                       struct snd_soc_tplg_pcm *src,
@@ -2089,7 +2089,7 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
  * @src: old version of phyical link config as a source
  * @link: latest version of physical link config created from the source
  *
- * Support from vesion 4. User need free the returned link config manually.
+ * Support from version 4. User need free the returned link config manually.
  */
 static int link_new_ver(struct soc_tplg *tplg,
                        struct snd_soc_tplg_link_config *src,
@@ -2400,7 +2400,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
  * @src: old version of manifest as a source
  * @manifest: latest version of manifest created from the source
  *
- * Support from vesion 4. Users need free the returned manifest manually.
+ * Support from version 4. Users need free the returned manifest manually.
  */
 static int manifest_new_ver(struct soc_tplg *tplg,
                            struct snd_soc_tplg_manifest *src,
index fd26580..c83fb62 100644 (file)
@@ -256,6 +256,7 @@ suspend:
 
        /* reset FW state */
        sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+       sdev->enabled_cores_mask = 0;
 
        return ret;
 }
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..d0f4ecd
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+       PERF_REG_MIPS_PC,
+       PERF_REG_MIPS_R1,
+       PERF_REG_MIPS_R2,
+       PERF_REG_MIPS_R3,
+       PERF_REG_MIPS_R4,
+       PERF_REG_MIPS_R5,
+       PERF_REG_MIPS_R6,
+       PERF_REG_MIPS_R7,
+       PERF_REG_MIPS_R8,
+       PERF_REG_MIPS_R9,
+       PERF_REG_MIPS_R10,
+       PERF_REG_MIPS_R11,
+       PERF_REG_MIPS_R12,
+       PERF_REG_MIPS_R13,
+       PERF_REG_MIPS_R14,
+       PERF_REG_MIPS_R15,
+       PERF_REG_MIPS_R16,
+       PERF_REG_MIPS_R17,
+       PERF_REG_MIPS_R18,
+       PERF_REG_MIPS_R19,
+       PERF_REG_MIPS_R20,
+       PERF_REG_MIPS_R21,
+       PERF_REG_MIPS_R22,
+       PERF_REG_MIPS_R23,
+       PERF_REG_MIPS_R24,
+       PERF_REG_MIPS_R25,
+       PERF_REG_MIPS_R26,
+       PERF_REG_MIPS_R27,
+       PERF_REG_MIPS_R28,
+       PERF_REG_MIPS_R29,
+       PERF_REG_MIPS_R30,
+       PERF_REG_MIPS_R31,
+       PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
index b7dd944..8f28faf 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD        0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD         (1 << (X86_FEATURE_ENQCMD & 31))
 
 #ifdef CONFIG_X86_SGX
 # define DISABLE_SGX   0
index 078cbd2..de7f30f 100644 (file)
@@ -4,4 +4,8 @@
 
 #include "../../../../include/linux/bootconfig.h"
 
+#ifndef fallthrough
+# define fallthrough
+#endif
+
 #endif
index 7362bef..6cd6080 100644 (file)
@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
        }
        /* TODO: Ensure the @path is initramfs/initrd image */
        if (fstat(fd, &stat) < 0) {
+               ret = -errno;
                pr_err("Failed to get the size of %s\n", path);
                goto out;
        }
index 6de5a7f..d2a9420 100644 (file)
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
 __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
 #define __NR_mount_setattr 442
 __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
 
 #define __NR_landlock_create_ruleset 444
 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
index 7d66876..d1b3270 100644 (file)
@@ -289,6 +289,9 @@ struct sockaddr_in {
 /* Address indicating an error return. */
 #define        INADDR_NONE             ((unsigned long int) 0xffffffff)
 
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define        INADDR_DUMMY            ((unsigned long int) 0xc0000008)
+
 /* Network number for local host loopback. */
 #define        IN_LOOPBACKNET          127
 
index 6061431..e9b619a 100644 (file)
@@ -1094,7 +1094,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        goto out_put_ctx;
                }
                if (xsk->fd == umem->fd)
-                       umem->rx_ring_setup_done = true;
+                       umem->tx_ring_setup_done = true;
        }
 
        err = xsk_get_mmap_offsets(xsk->fd, &off);
index 24295d3..523aa41 100644 (file)
@@ -747,6 +747,10 @@ int arch_rewrite_retpolines(struct objtool_file *file)
 
        list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
 
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC)
+                       continue;
+
                if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
                        continue;
 
index 743c2e9..41bca1d 100644 (file)
@@ -717,7 +717,7 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
 
 struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
 {
-       struct section *symtab;
+       struct section *symtab, *symtab_shndx;
        struct symbol *sym;
        Elf_Data *data;
        Elf_Scn *s;
@@ -769,6 +769,29 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
        symtab->len += data->d_size;
        symtab->changed = true;
 
+       symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+       if (symtab_shndx) {
+               s = elf_getscn(elf->elf, symtab_shndx->idx);
+               if (!s) {
+                       WARN_ELF("elf_getscn");
+                       return NULL;
+               }
+
+               data = elf_newdata(s);
+               if (!data) {
+                       WARN_ELF("elf_newdata");
+                       return NULL;
+               }
+
+               data->d_buf = &sym->sym.st_size; /* conveniently 0 */
+               data->d_size = sizeof(Elf32_Word);
+               data->d_align = 4;
+               data->d_type = ELF_T_WORD;
+
+               symtab_shndx->len += 4;
+               symtab_shndx->changed = true;
+       }
+
        sym->sec = find_section_by_index(elf, 0);
 
        elf_add_symbol(elf, sym);
index a62d098..8dc7cef 100644 (file)
@@ -90,7 +90,6 @@ endif
 ifeq ($(ARCH),mips)
   NO_PERF_REGS := 0
   CFLAGS += -I$(OUTPUT)arch/mips/include/generated
-  CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
   LIBUNWIND_LIBS = -lunwind -lunwind-mips
 endif
 
index bc3dd37..71efe65 100644 (file)
@@ -2733,6 +2733,12 @@ int cmd_record(int argc, const char **argv)
                rec->no_buildid = true;
        }
 
+       if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+               pr_err("Kernel has no cgroup sampling support.\n");
+               err = -EINVAL;
+               goto out_opts;
+       }
+
        if (rec->opts.kcore)
                rec->data.is_dir = true;
 
index dd8ff28..c783558 100755 (executable)
@@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt
 arch/x86/tools/gen-insn-attr-x86.awk
 arch/arm/include/uapi/asm/perf_regs.h
 arch/arm64/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
 arch/powerpc/include/uapi/asm/perf_regs.h
 arch/s390/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/perf_regs.h
index 4a7b8de..8c10955 100644 (file)
@@ -16,7 +16,7 @@ pinned=0
 exclusive=0
 exclude_user=0
 exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
 exclude_idle=0
 mmap=1
 comm=1
index 6b156dd..2aed20d 100755 (executable)
@@ -11,9 +11,9 @@ compare_number()
        second_num=$2
 
        # upper bound is first_num * 110%
-       upper=$(( $first_num + $first_num / 10 ))
+       upper=$(expr $first_num + $first_num / 10 )
        # lower bound is first_num * 90%
-       lower=$(( $first_num - $first_num / 10 ))
+       lower=$(expr $first_num - $first_num / 10 )
 
        if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
                echo "The difference between $first_num and $second_num are greater than 10%."
index b8fc5c5..0d8e3dc 100644 (file)
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
                            int __user *usockvec);
 extern int __sys_shutdown_sock(struct socket *sock, int how);
 extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
 #endif /* _LINUX_SOCKET_H */
index 974f10e..5ed674a 100644 (file)
@@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
 
        evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
        if (evsel->bperf_leader_link_fd < 0 &&
-           bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+           bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
+               err = -1;
                goto out;
-
+       }
        /*
         * The bpf_link holds reference to the leader program, and the
         * leader program holds reference to the maps. Therefore, if
@@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
        /* Step 2: load the follower skeleton */
        evsel->follower_skel = bperf_follower_bpf__open();
        if (!evsel->follower_skel) {
+               err = -1;
                pr_err("Failed to open follower skeleton\n");
                goto out;
        }
index b2f4920..7d2ba84 100644 (file)
@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
        if ((tag == DW_TAG_formal_parameter ||
             tag == DW_TAG_variable) &&
            die_compare_name(die_mem, fvp->name) &&
-       /* Does the DIE have location information or external instance? */
+       /*
+        * Does the DIE have location information or const value
+        * or external instance?
+        */
            (dwarf_attr(die_mem, DW_AT_external, &attr) ||
-            dwarf_attr(die_mem, DW_AT_location, &attr)))
+            dwarf_attr(die_mem, DW_AT_location, &attr) ||
+            dwarf_attr(die_mem, DW_AT_const_value, &attr)))
                return DIE_FIND_CB_END;
        if (dwarf_haspc(die_mem, fvp->addr))
                return DIE_FIND_CB_CONTINUE;
index 1bea5b2..ebc5e9a 100644 (file)
@@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
                rb_erase(&node->rb_node, root);
+               free(node->info_linear);
                free(node);
        }
 
index f81ac69..b1c930e 100644 (file)
@@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig)
        evsel->auto_merge_stats = orig->auto_merge_stats;
        evsel->collect_stat = orig->collect_stat;
        evsel->weak_group = orig->weak_group;
+       evsel->use_config_name = orig->use_config_name;
 
        if (evsel__copy_config_terms(evsel, orig) < 0)
                goto out_err;
index 75cf5db..bdad52a 100644 (file)
@@ -83,8 +83,10 @@ struct evsel {
                bool                    collect_stat;
                bool                    weak_group;
                bool                    bpf_counter;
+               bool                    use_config_name;
                int                     bpf_fd;
                struct bpf_object       *bpf_obj;
+               struct list_head        config_terms;
        };
 
        /*
@@ -116,10 +118,8 @@ struct evsel {
        bool                    merged_stat;
        bool                    reset_group;
        bool                    errored;
-       bool                    use_config_name;
        struct hashmap          *per_pkg_mask;
        struct evsel            *leader;
-       struct list_head        config_terms;
        int                     err;
        int                     cpu_iter;
        struct {
index 3ff4936..da19be7 100644 (file)
@@ -776,10 +776,10 @@ static int machine__process_ksymbol_register(struct machine *machine,
                if (dso) {
                        dso->kernel = DSO_SPACE__KERNEL;
                        map = map__new2(0, dso);
+                       dso__put(dso);
                }
 
                if (!dso || !map) {
-                       dso__put(dso);
                        return -ENOMEM;
                }
 
@@ -792,6 +792,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
                map->start = event->ksymbol.addr;
                map->end = map->start + event->ksymbol.len;
                maps__insert(&machine->kmaps, map);
+               map__put(map);
                dso__set_loaded(dso);
 
                if (is_bpf_image(event->ksymbol.name)) {
index 8336dd8..d3cf2de 100644 (file)
@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
        return false;
 }
 
-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
 {
        if (!ev1->pmu_name || !ev2->pmu_name)
-               return false;
+               return true;
 
        return !strcmp(ev1->pmu_name, ev2->pmu_name);
 }
@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                         */
                        if (!has_constraint &&
                            ev->leader != metric_events[i]->leader &&
-                           evsel_same_pmu(ev->leader, metric_events[i]->leader))
+                           evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
                                break;
                        if (!strcmp(metric_events[i]->name, ev->name)) {
                                set_bit(ev->idx, evlist_used);
@@ -1073,16 +1073,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
 
        ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
        if (ret)
-               return ret;
+               goto out;
 
        ret = resolve_metric(d->metric_no_group,
                                     d->metric_list, NULL, d->ids);
        if (ret)
-               return ret;
+               goto out;
 
        *(d->has_match) = true;
 
-       return *d->ret;
+out:
+       *(d->ret) = ret;
+       return ret;
 }
 
 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
index 829af17..0204116 100644 (file)
@@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel)
        evsel->core.attr.build_id = 1;
 }
 
+static void perf_probe_cgroup(struct evsel *evsel)
+{
+       evsel->core.attr.cgroup = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
        return perf_probe_api(perf_probe_sample_identifier);
@@ -182,3 +187,8 @@ bool perf_can_record_build_id(void)
 {
        return perf_probe_api(perf_probe_build_id);
 }
+
+bool perf_can_record_cgroup(void)
+{
+       return perf_probe_api(perf_probe_cgroup);
+}
index f12ca55..b104168 100644 (file)
@@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void);
 bool perf_can_record_text_poke_events(void);
 bool perf_can_sample_identifier(void);
 bool perf_can_record_build_id(void);
+bool perf_can_record_cgroup(void);
 
 #endif // __PERF_API_PROBE_H
index 866f2d5..b029c29 100644 (file)
@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
            immediate_value_is_supported()) {
                Dwarf_Sword snum;
 
+               if (!tvar)
+                       return 0;
+
                dwarf_formsdata(&attr, &snum);
                ret = asprintf(&tvar->value, "\\%ld", (long)snum);
 
index 0dbb4f2..86145dd 100644 (file)
@@ -1723,6 +1723,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
        if (event->header.size < hdr_sz || event->header.size > buf_sz)
                return -1;
 
+       buf += hdr_sz;
        rest = event->header.size - hdr_sz;
 
        if (readn(fd, buf, rest) != (ssize_t)rest)
index b759dfd..c588a6b 100644 (file)
@@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter)
        char *config;
        int ret = 0;
 
-       if (counter->uniquified_name ||
+       if (counter->uniquified_name || counter->use_config_name ||
            !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
                                           strlen(counter->pmu_name)))
                return;
@@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter)
                }
        } else {
                if (perf_pmu__has_hybrid()) {
-                       if (!counter->use_config_name) {
-                               ret = asprintf(&new_name, "%s/%s/",
-                                              counter->pmu_name, counter->name);
-                       }
+                       ret = asprintf(&new_name, "%s/%s/",
+                                      counter->pmu_name, counter->name);
                } else {
                        ret = asprintf(&new_name, "%s [%s]",
                                       counter->name, counter->pmu_name);
index 4c56aa8..a733457 100644 (file)
@@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes)
 
        list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
                list_del_init(&pos->note_list);
+               zfree(&pos->args);
                zfree(&pos->name);
                zfree(&pos->provider);
                free(pos);
index 1512092..3a9e332 100644 (file)
@@ -1147,7 +1147,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                }
        }
 
-       if (test->insn_processed) {
+       if (!unpriv && test->insn_processed) {
                uint32_t insn_processed;
                char *proc;
 
index ca8fdb1..7d7ebee 100644 (file)
@@ -61,6 +61,8 @@
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 0
 },
index 8a1caf4..e061e87 100644 (file)
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT
 },
 {
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT
 },
 {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
index 17fe33a..2c8935b 100644 (file)
@@ -8,6 +8,8 @@
        BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 7,
 },
index bd5cae4..1c857b2 100644 (file)
@@ -87,6 +87,8 @@
        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
index 8dcd4e0..11fc68d 100644 (file)
@@ -82,8 +82,8 @@
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       .retval_unpriv = 1,
-       .result_unpriv = ACCEPT,
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .retval = 1,
        .result = ACCEPT,
 },
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       .result_unpriv = ACCEPT,
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       .result_unpriv = ACCEPT,
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
index bd436df..111801a 100644 (file)
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R7 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 0,
 },
index 7ae2859..a3e593d 100644 (file)
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+       .errstr_unpriv = "R2 pointer comparison prohibited",
        .retval = 0,
 },
 {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        // fake-dead code; targeted from branch A to
-       // prevent dead code sanitization
+       // prevent dead code sanitization, rejected
+       // via branch B however
        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
        .retval = 0,
 },
 {
index fcd8e38..3573956 100644 (file)
@@ -43,6 +43,7 @@ enum vm_guest_mode {
        VM_MODE_P40V48_4K,
        VM_MODE_P40V48_64K,
        VM_MODE_PXXV48_4K,      /* For 48bits VA but ANY bits PA */
+       VM_MODE_P47V64_4K,
        NUM_VM_MODES,
 };
 
@@ -60,7 +61,7 @@ enum vm_guest_mode {
 
 #elif defined(__s390x__)
 
-#define VM_MODE_DEFAULT                        VM_MODE_P52V48_4K
+#define VM_MODE_DEFAULT                        VM_MODE_P47V64_4K
 #define MIN_PAGE_SHIFT                 12U
 #define ptes_per_page(page_size)       ((page_size) / 16)
 
@@ -285,10 +286,11 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
                                            uint32_t num_percpu_pages, void *guest_code,
                                            uint32_t vcpuids[]);
 
-/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
-                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
-                                   void *guest_code, uint32_t vcpuids[]);
+                                   uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+                                   uint32_t num_percpu_pages, void *guest_code,
+                                   uint32_t vcpuids[]);
 
 /*
  * Adds a vCPU with reasonable defaults (e.g. a stack)
index 1c4753f..82171f1 100644 (file)
@@ -268,7 +268,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 
        /* Create a VM with enough guest pages */
        guest_num_pages = test_mem_size / guest_page_size;
-       vm = vm_create_with_vcpus(mode, nr_vcpus,
+       vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
                                  guest_num_pages, 0, guest_code, NULL);
 
        /* Align down GPA of the testing memslot */
index 28e528c..a2b732c 100644 (file)
@@ -82,7 +82,7 @@ int kvm_check_cap(long cap)
 
        kvm_fd = open_kvm_dev_path_or_exit();
        ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
-       TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
+       TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
                "  rc: %i errno: %i", ret, errno);
 
        close(kvm_fd);
@@ -175,6 +175,7 @@ const char *vm_guest_mode_string(uint32_t i)
                [VM_MODE_P40V48_4K]     = "PA-bits:40,  VA-bits:48,  4K pages",
                [VM_MODE_P40V48_64K]    = "PA-bits:40,  VA-bits:48, 64K pages",
                [VM_MODE_PXXV48_4K]     = "PA-bits:ANY, VA-bits:48,  4K pages",
+               [VM_MODE_P47V64_4K]     = "PA-bits:47,  VA-bits:64,  4K pages",
        };
        _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
                       "Missing new mode strings?");
@@ -192,6 +193,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
        { 40, 48,  0x1000, 12 },
        { 40, 48, 0x10000, 16 },
        {  0,  0,  0x1000, 12 },
+       { 47, 64,  0x1000, 12 },
 };
 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
               "Missing new mode params?");
@@ -277,6 +279,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
                TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
 #endif
                break;
+       case VM_MODE_P47V64_4K:
+               vm->pgtable_levels = 5;
+               break;
        default:
                TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
        }
@@ -308,21 +313,50 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        return vm;
 }
 
+/*
+ * VM Create with customized parameters
+ *
+ * Input Args:
+ *   mode - VM Mode (e.g. VM_MODE_P52V48_4K)
+ *   nr_vcpus - VCPU count
+ *   slot0_mem_pages - Slot0 physical memory size
+ *   extra_mem_pages - Non-slot0 physical memory total size
+ *   num_percpu_pages - Per-cpu physical memory pages
+ *   guest_code - Guest entry point
+ *   vcpuids - VCPU IDs
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Pointer to opaque structure that describes the created VM.
+ *
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
+ * with customized slot0 memory size, at least 512 pages currently.
+ * extra_mem_pages is only used to calculate the maximum page table size,
+ * no real memory allocation for non-slot0 memory in this function.
+ */
 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
-                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
-                                   void *guest_code, uint32_t vcpuids[])
+                                   uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+                                   uint32_t num_percpu_pages, void *guest_code,
+                                   uint32_t vcpuids[])
 {
+       uint64_t vcpu_pages, extra_pg_pages, pages;
+       struct kvm_vm *vm;
+       int i;
+
+       /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
+       if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
+               slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
+
        /* The maximum page table size for a memory region will be when the
         * smallest pages are used. Considering each page contains x page
         * table descriptors, the total extra size for page tables (for extra
         * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
         * than N/x*2.
         */
-       uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
-       uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
-       uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages + extra_pg_pages;
-       struct kvm_vm *vm;
-       int i;
+       vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+       extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+       pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
 
        TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
                    "nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -354,8 +388,8 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
                                            uint32_t num_percpu_pages, void *guest_code,
                                            uint32_t vcpuids[])
 {
-       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
-                                   num_percpu_pages, guest_code, vcpuids);
+       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+                                   extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
 }
 
 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
index abf3818..7397ca2 100644 (file)
@@ -69,7 +69,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
        TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
                    "Guest memory size is not guest page size aligned.");
 
-       vm = vm_create_with_vcpus(mode, vcpus,
+       vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
                                  (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
                                  0, guest_code, NULL);
 
index 6ad6c82..af1031f 100644 (file)
@@ -166,75 +166,75 @@ size_t get_def_hugetlb_pagesz(void)
        return 0;
 }
 
+#define ANON_FLAGS     (MAP_PRIVATE | MAP_ANONYMOUS)
+#define ANON_HUGE_FLAGS        (ANON_FLAGS | MAP_HUGETLB)
+
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
 {
-       static const int anon_flags = MAP_PRIVATE | MAP_ANONYMOUS;
-       static const int anon_huge_flags = anon_flags | MAP_HUGETLB;
-
        static const struct vm_mem_backing_src_alias aliases[] = {
                [VM_MEM_SRC_ANONYMOUS] = {
                        .name = "anonymous",
-                       .flag = anon_flags,
+                       .flag = ANON_FLAGS,
                },
                [VM_MEM_SRC_ANONYMOUS_THP] = {
                        .name = "anonymous_thp",
-                       .flag = anon_flags,
+                       .flag = ANON_FLAGS,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
                        .name = "anonymous_hugetlb",
-                       .flag = anon_huge_flags,
+                       .flag = ANON_HUGE_FLAGS,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
                        .name = "anonymous_hugetlb_16kb",
-                       .flag = anon_huge_flags | MAP_HUGE_16KB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
                        .name = "anonymous_hugetlb_64kb",
-                       .flag = anon_huge_flags | MAP_HUGE_64KB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
                        .name = "anonymous_hugetlb_512kb",
-                       .flag = anon_huge_flags | MAP_HUGE_512KB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
                        .name = "anonymous_hugetlb_1mb",
-                       .flag = anon_huge_flags | MAP_HUGE_1MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
                        .name = "anonymous_hugetlb_2mb",
-                       .flag = anon_huge_flags | MAP_HUGE_2MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
                        .name = "anonymous_hugetlb_8mb",
-                       .flag = anon_huge_flags | MAP_HUGE_8MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
                        .name = "anonymous_hugetlb_16mb",
-                       .flag = anon_huge_flags | MAP_HUGE_16MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
                        .name = "anonymous_hugetlb_32mb",
-                       .flag = anon_huge_flags | MAP_HUGE_32MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
                        .name = "anonymous_hugetlb_256mb",
-                       .flag = anon_huge_flags | MAP_HUGE_256MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
                        .name = "anonymous_hugetlb_512mb",
-                       .flag = anon_huge_flags | MAP_HUGE_512MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
                        .name = "anonymous_hugetlb_1gb",
-                       .flag = anon_huge_flags | MAP_HUGE_1GB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
                        .name = "anonymous_hugetlb_2gb",
-                       .flag = anon_huge_flags | MAP_HUGE_2GB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
                        .name = "anonymous_hugetlb_16gb",
-                       .flag = anon_huge_flags | MAP_HUGE_16GB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB,
                },
                [VM_MEM_SRC_SHMEM] = {
                        .name = "shmem",
index 9307f25..1123965 100644 (file)
@@ -267,7 +267,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
        data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
        TEST_ASSERT(data->hva_slots, "malloc() fail");
 
-       data->vm = vm_create_default(VCPU_ID, 1024, guest_code);
+       data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
 
        pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
                max_mem_slots - 1, data->pages_per_slot, rempages);
index 76d9487..5abe92d 100755 (executable)
@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
        ipv4_rt_replace_mpath
 }
 
+# checks that cached input route on VRF port is deleted
+# when VRF is deleted
+ipv4_local_rt_cache()
+{
+       run_cmd "ip addr add 10.0.0.1/32 dev lo"
+       run_cmd "ip netns add test-ns"
+       run_cmd "ip link add veth-outside type veth peer name veth-inside"
+       run_cmd "ip link add vrf-100 type vrf table 1100"
+       run_cmd "ip link set veth-outside master vrf-100"
+       run_cmd "ip link set veth-inside netns test-ns"
+       run_cmd "ip link set veth-outside up"
+       run_cmd "ip link set vrf-100 up"
+       run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
+       run_cmd "ip netns exec test-ns ip link set veth-inside up"
+       run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
+       run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
+       run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
+       run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
+       run_cmd "ip link delete vrf-100"
+
+       # if we do not hang test is a success
+       log_test $? 0 "Cached route removed from VRF port device"
+}
+
 ipv4_route_test()
 {
        route_setup
 
        ipv4_rt_add
        ipv4_rt_replace
+       ipv4_local_rt_cache
 
        route_cleanup
 }
diff --git a/tools/testing/selftests/net/icmp.sh b/tools/testing/selftests/net/icmp.sh
new file mode 100755 (executable)
index 0000000..e4b04cd
--- /dev/null
@@ -0,0 +1,74 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for checking ICMP response with dummy address instead of 0.0.0.0.
+# Sets up two namespaces like:
+# +----------------------+                          +--------------------+
+# | ns1                  |    v4-via-v6 routes:     | ns2                |
+# |                      |                  '       |                    |
+# |             +--------+   -> 172.16.1.0/24 ->    +--------+           |
+# |             | veth0  +--------------------------+  veth0 |           |
+# |             +--------+   <- 172.16.0.0/24 <-    +--------+           |
+# |           172.16.0.1 |                          | 2001:db8:1::2/64   |
+# |     2001:db8:1::2/64 |                          |                    |
+# +----------------------+                          +--------------------+
+#
+# And then tries to ping 172.16.1.1 from ns1. This results in a "net
+# unreachable" message being sent from ns2, but there is no IPv4 address set in
+# that address space, so the kernel should substitute the dummy address
+# 192.0.0.8 defined in RFC7600.
+
+NS1=ns1
+NS2=ns2
+H1_IP=172.16.0.1/32
+H1_IP6=2001:db8:1::1
+RT1=172.16.1.0/24
+PINGADDR=172.16.1.1
+RT2=172.16.0.0/24
+H2_IP6=2001:db8:1::2
+
+TMPFILE=$(mktemp)
+
+cleanup()
+{
+    rm -f "$TMPFILE"
+    ip netns del $NS1
+    ip netns del $NS2
+}
+
+trap cleanup EXIT
+
+# Namespaces
+ip netns add $NS1
+ip netns add $NS2
+
+# Connectivity
+ip -netns $NS1 link add veth0 type veth peer name veth0 netns $NS2
+ip -netns $NS1 link set dev veth0 up
+ip -netns $NS2 link set dev veth0 up
+ip -netns $NS1 addr add $H1_IP dev veth0
+ip -netns $NS1 addr add $H1_IP6/64 dev veth0 nodad
+ip -netns $NS2 addr add $H2_IP6/64 dev veth0 nodad
+ip -netns $NS1 route add $RT1 via inet6 $H2_IP6
+ip -netns $NS2 route add $RT2 via inet6 $H1_IP6
+
+# Make sure ns2 will respond with ICMP unreachable
+ip netns exec $NS2 sysctl -qw net.ipv4.icmp_ratelimit=0 net.ipv4.ip_forward=1
+
+# Run the test - a ping runs in the background, and we capture ICMP responses
+# with tcpdump; -c 1 means it should exit on the first ping, but add a timeout
+# in case something goes wrong
+ip netns exec $NS1 ping -w 3 -i 0.5 $PINGADDR >/dev/null &
+ip netns exec $NS1 timeout 10 tcpdump -tpni veth0 -c 1 'icmp and icmp[icmptype] != icmp-echo' > $TMPFILE 2>/dev/null
+
+# Parse response and check for dummy address
+# tcpdump output looks like:
+# IP 192.0.0.8 > 172.16.0.1: ICMP net 172.16.1.1 unreachable, length 92
+RESP_IP=$(awk '{print $2}' < $TMPFILE)
+if [[ "$RESP_IP" != "192.0.0.8" ]]; then
+    echo "FAIL - got ICMP response from $RESP_IP, should be 192.0.0.8"
+    exit 1
+else
+    echo "OK"
+    exit 0
+fi
index 3c4cb72..2b495dc 100755 (executable)
@@ -197,9 +197,6 @@ ip -net "$ns4" link set ns4eth3 up
 ip -net "$ns4" route add default via 10.0.3.2
 ip -net "$ns4" route add default via dead:beef:3::2
 
-# use TCP syn cookies, even if no flooding was detected.
-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
-
 set_ethtool_flags() {
        local ns="$1"
        local dev="$2"
@@ -501,6 +498,7 @@ do_transfer()
        local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
        local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
        local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+       local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
 
        expect_synrx=$((stat_synrx_last_l))
        expect_ackrx=$((stat_ackrx_last_l))
@@ -518,10 +516,14 @@ do_transfer()
                        "${stat_synrx_now_l}" "${expect_synrx}" 1>&2
                retc=1
        fi
-       if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then
-               printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
-                       "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
-               rets=1
+       if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+               if [ ${stat_ooo_now} -eq 0 ]; then
+                       printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
+                               "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+                       rets=1
+               else
+                       printf "[ Note ] fallback due to TCP OoO"
+               fi
        fi
 
        if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
@@ -732,6 +734,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
                exit $ret
        fi
 
+       # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
+       # mptcp syncookie support.
+       if [ $sender = $ns1 ]; then
+               ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+       else
+               ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
+       fi
+
        run_tests "$ns2" $sender 10.0.1.2
        run_tests "$ns2" $sender dead:beef:1::2
        run_tests "$ns2" $sender 10.0.2.1
index a8fa641..7f26591 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 readonly BASE="ns-$(mktemp -u XXXXXX)"
index 2fedc07..11d7cdb 100755 (executable)
@@ -18,7 +18,8 @@ ret=0
 
 cleanup() {
        local ns
-       local -r jobs="$(jobs -p)"
+       local jobs
+       readonly jobs="$(jobs -p)"
        [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
        rm -f $STATS
 
@@ -108,7 +109,7 @@ chk_gro() {
 
 if [ ! -f ../bpf/xdp_dummy.o ]; then
        echo "Missing xdp_dummy helper. Build bpf selftest first"
-       exit -1
+       exit 1
 fi
 
 create_ns
index 3171069..cd6430b 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
+TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
new file mode 100755 (executable)
index 0000000..6caf6ac
--- /dev/null
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# This tests the fib expression.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+timeout=4
+
+log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+       ip netns del ${nsrouter}
+
+       [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+dmesg | grep -q ' nft_rpfilter: '
+if [ $? -eq 0 ]; then
+       dmesg -c | grep ' nft_rpfilter: '
+       echo "WARN: a previous test run has failed" 1>&2
+fi
+
+sysctl -q net.netfilter.nf_log_all_netns=1
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+load_ruleset() {
+       local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+       chain prerouting {
+               type filter hook prerouting priority 0; policy accept;
+               fib saddr . iif oif missing counter log prefix "$netns nft_rpfilter: " drop
+       }
+}
+EOF
+}
+
+load_ruleset_count() {
+       local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+       chain prerouting {
+               type filter hook prerouting priority 0; policy accept;
+               ip daddr 1.1.1.1 fib saddr . iif oif missing counter drop
+               ip6 daddr 1c3::c01d fib saddr . iif oif missing counter drop
+       }
+}
+EOF
+}
+
+check_drops() {
+       dmesg | grep -q ' nft_rpfilter: '
+       if [ $? -eq 0 ]; then
+               dmesg | grep ' nft_rpfilter: '
+               echo "FAIL: rpfilter did drop packets"
+               return 1
+       fi
+
+       return 0
+}
+
+check_fib_counter() {
+       local want=$1
+       local ns=$2
+       local address=$3
+
+       line=$(ip netns exec ${ns} nft list table inet filter | grep 'fib saddr . iif' | grep $address | grep "packets $want" )
+       ret=$?
+
+       if [ $ret -ne 0 ];then
+               echo "Netns $ns fib counter doesn't match expected packet count of $want for $address" 1>&2
+               ip netns exec ${ns} nft list table inet filter
+               return 1
+       fi
+
+       if [ $want -gt 0 ]; then
+               echo "PASS: fib expression did drop packets for $address"
+       fi
+
+       return 0
+}
+
+load_ruleset ${nsrouter}
+load_ruleset ${ns1}
+load_ruleset ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+test_ping() {
+  local daddr4=$1
+  local daddr6=$2
+
+  ip netns exec ${ns1} ping -c 1 -q $daddr4 > /dev/null
+  ret=$?
+  if [ $ret -ne 0 ];then
+       check_drops
+       echo "FAIL: ${ns1} cannot reach $daddr4, ret $ret" 1>&2
+       return 1
+  fi
+
+  ip netns exec ${ns1} ping -c 3 -q $daddr6 > /dev/null
+  ret=$?
+  if [ $ret -ne 0 ];then
+       check_drops
+       echo "FAIL: ${ns1} cannot reach $daddr6, ret $ret" 1>&2
+       return 1
+  fi
+
+  return 0
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+sleep 3
+
+test_ping 10.0.2.1 dead:2::1 || exit 1
+check_drops || exit 1
+
+test_ping 10.0.2.99 dead:2::99 || exit 1
+check_drops || exit 1
+
+echo "PASS: fib expression did not cause unwanted packet drops"
+
+ip netns exec ${nsrouter} nft flush table inet filter
+
+ip -net ${ns1} route del default
+ip -net ${ns1} -6 route del default
+
+ip -net ${ns1} addr del 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr del dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr add dead:2::99/64 dev eth0
+
+ip -net ${ns1} route add default via 10.0.2.1
+ip -net ${ns1} -6 route add default via dead:2::1
+
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth0
+
+# switch to ruleset that doesn't log, this time
+# its expected that this does drop the packets.
+load_ruleset_count ${nsrouter}
+
+# ns1 has a default route, but nsrouter does not.
+# must not check return value, ping to 1.1.1.1 will
+# fail.
+check_fib_counter 0 ${nsrouter} 1.1.1.1 || exit 1
+check_fib_counter 0 ${nsrouter} 1c3::c01d || exit 1
+
+ip netns exec ${ns1} ping -c 1 -W 1 -q 1.1.1.1 > /dev/null
+check_fib_counter 1 ${nsrouter} 1.1.1.1 || exit 1
+
+sleep 2
+ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
+check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+
+exit 0
index bed4b53..8f3e72e 100644 (file)
@@ -10,6 +10,7 @@
 /proc-self-map-files-002
 /proc-self-syscall
 /proc-self-wchan
+/proc-subset-pid
 /proc-uptime-001
 /proc-uptime-002
 /read
index 7ed7cd9..ebc4ee0 100755 (executable)
@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
 ip1 -4 route add default dev wg0 table 51820
 ip1 -4 rule add not fwmark 51820 table 51820
 ip1 -4 rule add table main suppress_prefixlength 0
+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
 # Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
 n1 ping -W 1 -c 100 -f 192.168.99.7
 n1 ping -W 1 -c 100 -f abab::1111
index 4eecb43..74db83a 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
 CONFIG_NETFILTER_XT_NAT=y
 CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_NF_NAT_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_FILTER=y