Merge tag 'regulator-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 27 Jan 2020 19:18:55 +0000 (11:18 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 27 Jan 2020 19:18:55 +0000 (11:18 -0800)
Pull regulator updates from Mark Brown:
 "Hardly anything going on in the core this time around with the
  regulator API and pretty quiet on the driver front:

   - An API for comparing regulators, useful for devices that need to
     check if supply voltages exactly match rather than just nominally
     match.

   - Conversion of several DT bindings to YAML format.

   - Conversion of I2C drivers to probe_new().

   - New drivers for Monolithic MPQ7920 and MP8859, and Rohm BD71828"

* tag 'regulator-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator: (34 commits)
  dt-bindings: regulator: add document bindings for mpq7920
  regulator: core: Fix exported symbols to the exported GPL version
  regulator: mpq7920: Fix incorrect defines
  regulator: vqmmc-ipq4019: Fix platform_no_drv_owner.cocci warnings
  regulator: vctrl-regulator: Avoid deadlock getting and setting the voltage
  regulator fix for "regulator: core: Add regulator_is_equal() helper"
  regulator: core: Add regulator_is_equal() helper
  regulator: mpq7920: Convert to use .probe_new
  regulator: mpq7920: Remove unneeded fields from struct mpq7920_regulator_info
  regulator: vqmmc-ipq4019: Trivial clean up
  regulator: vqmmc-ipq4019: Remove ipq4019_regulator_remove
  regulator: bindings: Drop document bindings for mpq7920
  dt-bindings: Drop entry for Monolithic Power System, MPS
  regulator: bd718x7: Simplify the code by removing struct bd718xx_pmic_inits
  regulator: add IPQ4019 SDHCI VQMMC LDO driver
  regulator: Convert i2c drivers to use .probe_new
  regulator: mpq7920: Check the correct variable in mpq7920_regulator_register()
  regulator: mpq7920: Fix Woverflow warning on conversion
  regulator: mp8859: tidy up white space in probe
  regulator: mpq7920: add mpq7920 regulator driver
  ...

573 files changed:
Documentation/ABI/stable/sysfs-class-tpm
Documentation/ABI/stable/sysfs-driver-dma-idxd [new file with mode: 0644]
Documentation/ABI/stable/sysfs-driver-mlxreg-io
Documentation/ABI/testing/sysfs-platform-asus-wmi
Documentation/arm64/cpu-feature-registers.rst
Documentation/arm64/elf_hwcaps.rst
Documentation/arm64/silicon-errata.rst
Documentation/core-api/xarray.rst
Documentation/devicetree/bindings/dma/fsl-edma.txt
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
Documentation/devicetree/bindings/dma/jz4780-dma.txt
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/dma/ti/k3-udma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt [deleted file]
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
Documentation/devicetree/bindings/mmc/sdhci-msm.txt
Documentation/devicetree/bindings/mmc/sdhci-omap.txt
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt [deleted file]
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/nuvoton,npcm-pspi.txt
Documentation/devicetree/bindings/spi/spi-stm32.txt [deleted file]
Documentation/devicetree/bindings/spi/spi_atmel.txt
Documentation/devicetree/bindings/spi/st,stm32-spi.yaml [new file with mode: 0644]
Documentation/driver-api/dmaengine/client.rst
Documentation/driver-api/dmaengine/provider.rst
Documentation/hwmon/adm1177.rst [new file with mode: 0644]
Documentation/hwmon/drivetemp.rst [new file with mode: 0644]
Documentation/hwmon/index.rst
Documentation/hwmon/max20730.rst [new file with mode: 0644]
Documentation/hwmon/max31730.rst [new file with mode: 0644]
Documentation/hwmon/pmbus.rst
Documentation/hwmon/ucd9000.rst
Documentation/hwmon/xdpe12284.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/boot/dts/am335x-boneblack-common.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/kernel/hyp-stub.S
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/Makefile
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/archrandom.h [new file with mode: 0644]
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/checksum.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/daifflags.h
arch/arm64/include/asm/exception.h
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/kexec.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/lse.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/sections.h
arch/arm64/include/asm/simd.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/uapi/asm/hwcap.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu-reset.S
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/kexec_image.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/machine_kexec_file.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/ssbd.c
arch/arm64/kernel/syscall.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/hyp/tlb.c
arch/arm64/kvm/sys_regs.c
arch/arm64/lib/Makefile
arch/arm64/lib/clear_page.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_page.S
arch/arm64/lib/copy_to_user.S
arch/arm64/lib/crc32.S
arch/arm64/lib/csum.c [new file with mode: 0644]
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S
arch/arm64/lib/memset.S
arch/arm64/lib/strchr.S
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/lib/strnlen.S
arch/arm64/lib/strrchr.S
arch/arm64/lib/tishift.S
arch/arm64/mm/cache.S
arch/arm64/mm/context.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/proc.S
arch/arm64/xen/hypercall.S
arch/m68k/Kconfig
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/entry.S
arch/m68k/kernel/process.c
arch/m68k/kernel/syscalls/syscall.tbl
arch/powerpc/Kconfig
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/xive-regs.h
arch/powerpc/sysdev/xive/common.c
arch/x86/include/asm/intel_pmc_ipc.h
arch/x86/include/asm/intel_scu_ipc.h
arch/x86/include/asm/intel_telemetry.h
arch/x86/include/asm/io.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/microcode_amd.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/cpu/mce/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/mce/internal.h
arch/x86/kernel/cpu/mce/therm_throt.c
block/partition-generic.c
drivers/acpi/arm64/iort.c
drivers/atm/firestream.c
drivers/base/regmap/regmap-i2c.c
drivers/base/regmap/regmap.c
drivers/char/tpm/tpm-sysfs.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/bcm2835-dma.c
drivers/dma/dma-axi-dmac.c
drivers/dma/dma-jz4780.c
drivers/dma/dmaengine.c
drivers/dma/dmaengine.h
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-common.h
drivers/dma/fsl-edma.c
drivers/dma/fsl-qdma.c
drivers/dma/hisi_dma.c [new file with mode: 0644]
drivers/dma/idxd/Makefile [new file with mode: 0644]
drivers/dma/idxd/cdev.c [new file with mode: 0644]
drivers/dma/idxd/device.c [new file with mode: 0644]
drivers/dma/idxd/dma.c [new file with mode: 0644]
drivers/dma/idxd/idxd.h [new file with mode: 0644]
drivers/dma/idxd/init.c [new file with mode: 0644]
drivers/dma/idxd/irq.c [new file with mode: 0644]
drivers/dma/idxd/registers.h [new file with mode: 0644]
drivers/dma/idxd/submit.c [new file with mode: 0644]
drivers/dma/idxd/sysfs.c [new file with mode: 0644]
drivers/dma/imx-sdma.c
drivers/dma/ioat/init.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/of-dma.c
drivers/dma/owl-dma.c
drivers/dma/pl330.c
drivers/dma/plx_dma.c [new file with mode: 0644]
drivers/dma/s3c24xx-dma.c
drivers/dma/sf-pdma/sf-pdma.c
drivers/dma/sun4i-dma.c
drivers/dma/ti/Kconfig
drivers/dma/ti/Makefile
drivers/dma/ti/edma.c
drivers/dma/ti/k3-psil-am654.c [new file with mode: 0644]
drivers/dma/ti/k3-psil-j721e.c [new file with mode: 0644]
drivers/dma/ti/k3-psil-priv.h [new file with mode: 0644]
drivers/dma/ti/k3-psil.c [new file with mode: 0644]
drivers/dma/ti/k3-udma-glue.c [new file with mode: 0644]
drivers/dma/ti/k3-udma-private.c [new file with mode: 0644]
drivers/dma/ti/k3-udma.c [new file with mode: 0644]
drivers/dma/ti/k3-udma.h [new file with mode: 0644]
drivers/dma/virt-dma.c
drivers/dma/virt-dma.h
drivers/dma/xilinx/zynqmp_dma.c
drivers/edac/Kconfig
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/aspeed_edac.c
drivers/edac/i5100_edac.c
drivers/edac/mce_amd.c
drivers/edac/sifive_edac.c
drivers/edac/skx_common.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/i915/gem/i915_gem_busy.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_gem.h
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_job.h
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_mmu.h
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hidraw.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/adm1177.c [new file with mode: 0644]
drivers/hwmon/adt7475.c
drivers/hwmon/drivetemp.c [new file with mode: 0644]
drivers/hwmon/hwmon.c
drivers/hwmon/k10temp.c
drivers/hwmon/max31730.c [new file with mode: 0644]
drivers/hwmon/nct7802.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/Makefile
drivers/hwmon/pmbus/ibm-cffps.c
drivers/hwmon/pmbus/max20730.c [new file with mode: 0644]
drivers/hwmon/pmbus/max20751.c
drivers/hwmon/pmbus/pmbus.c
drivers/hwmon/pmbus/pmbus.h
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/pmbus/pxe1610.c
drivers/hwmon/pmbus/tps53679.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/pmbus/xdpe12284.c [new file with mode: 0644]
drivers/hwmon/pwm-fan.c
drivers/hwmon/w83627ehf.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/i3c/master/i3c-master-cdns.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/evdev.c
drivers/input/misc/keyspan_remote.c
drivers/input/misc/max77650-onkey.c
drivers/input/misc/pm8xxx-vibrator.c
drivers/input/rmi4/rmi_f54.c
drivers/input/rmi4/rmi_smbus.c
drivers/input/tablet/aiptek.c
drivers/input/tablet/gtco.c
drivers/input/tablet/pegasus_notetaker.c
drivers/input/touchscreen/sun4i-ts.c
drivers/input/touchscreen/sur40.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/leds/leds-as3645a.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lm3532.c
drivers/leds/leds-max77650.c
drivers/leds/leds-rb532.c
drivers/leds/trigger/ledtrig-pattern.c
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/Kconfig
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/au1xmmc.c
drivers/mmc/host/bcm2835.c
drivers/mmc/host/cavium-thunderx.c
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/jz4740_mmc.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/mvsdio.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/owl-mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/renesas_sdhi.h
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-brcmstb.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-milbeaut.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-sirf.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sdhci_am654.c
drivers/mmc/host/sdhci_f_sdh30.c
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/sunxi-mmc.c
drivers/mmc/host/tmio_mmc_core.c
drivers/mmc/host/uniphier-sd.c
drivers/mmc/host/usdhi6rol0.c
drivers/net/can/slcan.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/natsemi/sonic.c
drivers/net/ethernet/natsemi/sonic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/gtp.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/mediatek/mt76/airtime.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/pci/quirks.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/pinctrl/core.c
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/intel-uncore-frequency.c [new file with mode: 0644]
drivers/platform/x86/intel_atomisp2_pm.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmc_core.h
drivers/platform/x86/intel_pmc_ipc.c
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/intel_speed_select_if/isst_if_common.c
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/touchscreen_dmi.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/sd.c
drivers/soc/ti/Kconfig
drivers/soc/ti/Makefile
drivers/soc/ti/k3-ringacc.c [new file with mode: 0644]
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-atmel.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-dw.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-fsl-lpspi.c
drivers/spi/spi-fsl-qspi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-hisi-sfc-v3xx.c [new file with mode: 0644]
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi-mxs.c
drivers/spi/spi-npcm-pspi.c
drivers/spi/spi-oc-tiny.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-qcom-qspi.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-sirf.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-stm32.c
drivers/spi/spi-tegra114.c
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi-uniphier.c
drivers/spi/spi.c
drivers/target/iscsi/iscsi_target.c
drivers/tee/optee/Kconfig
fs/afs/cell.c
fs/btrfs/dev-replace.c
fs/btrfs/scrub.c
fs/ceph/mds_client.c
fs/io_uring.c
fs/namei.c
fs/readdir.c
fs/reiserfs/xattr.c
include/dt-bindings/dma/x1830-dma.h [new file with mode: 0644]
include/linux/dma/k3-psil.h [new file with mode: 0644]
include/linux/dma/k3-udma-glue.h [new file with mode: 0644]
include/linux/dma/ti-cppi5.h [new file with mode: 0644]
include/linux/dmaengine.h
include/linux/genhd.h
include/linux/gpio/consumer.h
include/linux/hwmon.h
include/linux/mfd/tmio.h
include/linux/mmc/slot-gpio.h
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/nfnetlink.h
include/linux/pci_ids.h
include/linux/pinctrl/consumer.h
include/linux/platform_data/mlxreg.h
include/linux/platform_data/x86/asus-wmi.h
include/linux/pmbus.h
include/linux/regmap.h
include/linux/soc/ti/k3-ringacc.h [new file with mode: 0644]
include/linux/spi/spi.h
include/linux/spi/spi_oc_tiny.h
include/linux/xarray.h
include/net/netns/nftables.h
include/trace/events/xen.h
include/uapi/asm-generic/mman-common.h
include/uapi/linux/hidraw.h
include/uapi/linux/idxd.h [new file with mode: 0644]
include/uapi/linux/io_uring.h
kernel/kexec.c
kernel/kexec_core.c
kernel/kexec_file.c
kernel/kexec_internal.h
kernel/power/snapshot.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_uprobe.c
lib/Makefile
lib/fdt_addresses.c [new file with mode: 0644]
lib/livepatch/test_klp_shadow_vars.c
lib/strncpy_from_user.c
lib/strnlen_user.c
lib/test_xarray.c
lib/xarray.c
net/atm/proc.c
net/caif/caif_usb.c
net/core/dev.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/utils.c
net/hsr/hsr_main.h
net/ipv4/esp4_offload.c
net/ipv4/fou.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/seg6_local.c
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nft_osf.c
net/rose/af_rose.c
net/sched/cls_api.c
net/sched/ematch.c
net/xfrm/xfrm_interface.c
samples/livepatch/livepatch-shadow-fix1.c
samples/livepatch/livepatch-shadow-fix2.c
samples/livepatch/livepatch-shadow-mod.c
scripts/Kconfig.include
scripts/recordmcount.c
tools/power/x86/intel-speed-select/isst-config.c
tools/power/x86/intel-speed-select/isst-core.c
tools/power/x86/intel-speed-select/isst-display.c
tools/power/x86/intel-speed-select/isst.h

index c0e2383..58e94e7 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/class/tpm/tpmX/device/
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The device/ directory under a specific TPM instance exposes
                the properties of that TPM chip
 
@@ -9,7 +9,7 @@ Description:    The device/ directory under a specific TPM instance exposes
 What:          /sys/class/tpm/tpmX/device/active
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "active" property prints a '1' if the TPM chip is accepting
                commands. An inactive TPM chip still contains all the state of
                an active chip (Storage Root Key, NVRAM, etc), and can be
@@ -21,7 +21,7 @@ Description:  The "active" property prints a '1' if the TPM chip is accepting
 What:          /sys/class/tpm/tpmX/device/cancel
 Date:          June 2005
 KernelVersion: 2.6.13
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "cancel" property allows you to cancel the currently
                pending TPM command. Writing any value to cancel will call the
                TPM vendor specific cancel operation.
@@ -29,7 +29,7 @@ Description:  The "cancel" property allows you to cancel the currently
 What:          /sys/class/tpm/tpmX/device/caps
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "caps" property contains TPM manufacturer and version info.
 
                Example output:
@@ -46,7 +46,7 @@ Description:  The "caps" property contains TPM manufacturer and version info.
 What:          /sys/class/tpm/tpmX/device/durations
 Date:          March 2011
 KernelVersion: 3.1
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "durations" property shows the 3 vendor-specific values
                used to wait for a short, medium and long TPM command. All
                TPM commands are categorized as short, medium or long in
@@ -69,7 +69,7 @@ Description:  The "durations" property shows the 3 vendor-specific values
 What:          /sys/class/tpm/tpmX/device/enabled
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "enabled" property prints a '1' if the TPM chip is enabled,
                meaning that it should be visible to the OS. This property
                may be visible but produce a '0' after some operation that
@@ -78,7 +78,7 @@ Description:  The "enabled" property prints a '1' if the TPM chip is enabled,
 What:          /sys/class/tpm/tpmX/device/owned
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "owned" property produces a '1' if the TPM_TakeOwnership
                ordinal has been executed successfully in the chip. A '0'
                indicates that ownership hasn't been taken.
@@ -86,7 +86,7 @@ Description:  The "owned" property produces a '1' if the TPM_TakeOwnership
 What:          /sys/class/tpm/tpmX/device/pcrs
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "pcrs" property will dump the current value of all Platform
                Configuration Registers in the TPM. Note that since these
                values may be constantly changing, the output is only valid
@@ -109,7 +109,7 @@ Description:        The "pcrs" property will dump the current value of all Platform
 What:          /sys/class/tpm/tpmX/device/pubek
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "pubek" property will return the TPM's public endorsement
                key if possible. If the TPM has had ownership established and
                is version 1.2, the pubek will not be available without the
@@ -161,7 +161,7 @@ Description:        The "pubek" property will return the TPM's public endorsement
 What:          /sys/class/tpm/tpmX/device/temp_deactivated
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "temp_deactivated" property returns a '1' if the chip has
                been temporarily deactivated, usually until the next power
                cycle. Whether a warm boot (reboot) will clear a TPM chip
@@ -170,7 +170,7 @@ Description:        The "temp_deactivated" property returns a '1' if the chip has
 What:          /sys/class/tpm/tpmX/device/timeouts
 Date:          March 2011
 KernelVersion: 3.1
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "timeouts" property shows the 4 vendor-specific values
                for the TPM's interface spec timeouts. The use of these
                timeouts is defined by the TPM interface spec that the chip
@@ -183,3 +183,14 @@ Description:       The "timeouts" property shows the 4 vendor-specific values
                The four timeout values are shown in usecs, with a trailing
                "[original]" or "[adjusted]" depending on whether the values
                were scaled by the driver to be reported in usec from msecs.
+
+What:          /sys/class/tpm/tpmX/tpm_version_major
+Date:          October 2019
+KernelVersion: 5.5
+Contact:       linux-integrity@vger.kernel.org
+Description:   The "tpm_version_major" property shows the TCG spec major version
+               implemented by the TPM device.
+
+               Example output:
+
+               2
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
new file mode 100644 (file)
index 0000000..f4be46c
--- /dev/null
@@ -0,0 +1,171 @@
+What:           sys/bus/dsa/devices/dsa<m>/cdev_major
+Date:           Oct 25, 2019
+KernelVersion:         5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:   The major number that the character device driver assigned to
+               this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/errors
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The error information for this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_batch_size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The largest number of work descriptors in a batch.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_work_queues_size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum work queue size supported by this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_engines
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum number of engines supported by this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_groups
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum number of groups can be created under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_tokens
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The total number of bandwidth tokens supported by this device.
+               The bandwidth tokens represent resources within the DSA
+               implementation, and these resources are allocated by engines to
+               support operations.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_transfer_size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The number of bytes to be read from the source address to
+               perform the operation. The maximum transfer size is dependent on
+               the workqueue the descriptor was submitted to.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_work_queues
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum work queue number that this device supports.
+
+What:           sys/bus/dsa/devices/dsa<m>/numa_node
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The numa node number for this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/op_cap
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The operation capability bit mask specify the operation types
+               supported by the this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/state
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The state information of this device. It can be either enabled
+               or disabled.
+
+What:           sys/bus/dsa/devices/dsa<m>/group<m>.<n>
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The assigned group under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/engine<m>.<n>
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The assigned engine under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/wq<m>.<n>
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The assigned work queue under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/configurable
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    To indicate if this device is configurable or not.
+
+What:           sys/bus/dsa/devices/dsa<m>/token_limit
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum number of bandwidth tokens that may be in use at
+               one time by operations that access low bandwidth memory in the
+               device.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/group_id
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The group id that this work queue belongs to.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The work queue size for this work queue.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/type
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The type of this work queue, it can be "kernel" type for work
+               queue usages in the kernel space or "user" type for work queue
+               usages by applications in user space.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/cdev_minor
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The minor number assigned to this work queue by the character
+               device driver.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/mode
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The work queue mode type for this work queue.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/priority
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The priority value of this work queue, it is a vlue relative to
+               other work queue in the same group to control quality of service
+               for dispatching work from multiple workqueues in the same group.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/state
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The current state of the work queue.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/threshold
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The number of entries in this work queue that may be filled
+               via a limited portal.
+
+What:           sys/bus/dsa/devices/engine<m>.<n>/group_id
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The group that this engine belongs to.
index 05601a9..b0d90cc 100644 (file)
@@ -1,5 +1,4 @@
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic_health
-
 Date:          June 2018
 KernelVersion: 4.19
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -19,7 +18,6 @@ Description:  These files show with which CPLD versions have been burned
                The files are read only.
 
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/fan_dir
-
 Date:          December 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -30,7 +28,6 @@ Description:  This file shows the system fans direction:
                The files are read only.
 
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version
-
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -40,7 +37,6 @@ Description:  These files show with which CPLD versions have been burned
                The files are read only.
 
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
-
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -108,7 +104,6 @@ What:               /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_pwr_fail
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_comex
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_system
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_voltmon_upgrade_fail
-
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -130,6 +125,12 @@ Description:       These files show with which CPLD versions have been burned
 
                The files are read only.
 
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_thermal
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_wd
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_asic
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_reload_bios
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
 Date:          June 2019
 KernelVersion: 5.3
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -143,9 +144,65 @@ Description:       These files show the system reset cause, as following:
 
                The files are read only.
 
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_thermal
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_wd
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_asic
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_reload_bios
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config1
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config2
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   These files show system static topology identification
+               like system's static I2C topology, number and type of FPGA
+               devices within the system and so on.
+
+               The files are read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_ac_pwr_fail
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_platform
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_soc
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sw_pwr_off
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   These files show the system reset causes, as following: reset
+               due to AC power failure, reset invoked from software by
+               assertion reset signal through CPLD. reset caused by signal
+               asserted by SOC through ACPI register, reset invoked from
+               software by assertion power off signal through CPLD.
+
+               The files are read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pcie_asic_reset_dis
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file allows to retain ASIC up during PCIe root complex
+               reset, when attribute is set 1.
+
+               The file is read/write.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/vpd_wp
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file allows to overwrite system VPD hardware wrtie
+               protection when attribute is set 1.
+
+               The file is read/write.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/voltreg_update_status
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file exposes the configuration update status of burnable
+               voltage regulator devices. The status values are as following:
+               0 - OK; 1 - CRC failure; 2 = I2C failure; 3 - in progress.
+
+               The file is read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ufm_version
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file exposes the firmware version of burnable voltage
+               regulator devices.
+
+               The file is read only.
index 9e99f29..1efac0d 100644 (file)
@@ -46,3 +46,13 @@ Description:
                        * 0 - normal,
                        * 1 - overboost,
                        * 2 - silent
+
+What:          /sys/devices/platform/<platform>/throttle_thermal_policy
+Date:          Dec 2019
+KernelVersion: 5.6
+Contact:       "Leonid Maksymchuk" <leonmaxx@gmail.com>
+Description:
+               Throttle thermal policy mode:
+                       * 0 - default,
+                       * 1 - overboost,
+                       * 2 - silent
index b6e4488..41937a8 100644 (file)
@@ -117,6 +117,8 @@ infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | RNDR                         | [63-60] |    y    |
+     +------------------------------+---------+---------+
      | TS                           | [55-52] |    y    |
      +------------------------------+---------+---------+
      | FHM                          | [51-48] |    y    |
@@ -200,6 +202,12 @@ infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | I8MM                         | [55-52] |    y    |
+     +------------------------------+---------+---------+
+     | DGH                          | [51-48] |    y    |
+     +------------------------------+---------+---------+
+     | BF16                         | [47-44] |    y    |
+     +------------------------------+---------+---------+
      | SB                           | [39-36] |    y    |
      +------------------------------+---------+---------+
      | FRINTTS                      | [35-32] |    y    |
@@ -234,10 +242,18 @@ infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | F64MM                        | [59-56] |    y    |
+     +------------------------------+---------+---------+
+     | F32MM                        | [55-52] |    y    |
+     +------------------------------+---------+---------+
+     | I8MM                         | [47-44] |    y    |
+     +------------------------------+---------+---------+
      | SM4                          | [43-40] |    y    |
      +------------------------------+---------+---------+
      | SHA3                         | [35-32] |    y    |
      +------------------------------+---------+---------+
+     | BF16                         | [23-20] |    y    |
+     +------------------------------+---------+---------+
      | BitPerm                      | [19-16] |    y    |
      +------------------------------+---------+---------+
      | AES                          | [7-4]   |    y    |
index 7fa3d21..7dfb97d 100644 (file)
@@ -204,6 +204,37 @@ HWCAP2_FRINT
 
     Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
 
+HWCAP2_SVEI8MM
+
+    Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
+
+HWCAP2_SVEF32MM
+
+    Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
+
+HWCAP2_SVEF64MM
+
+    Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
+
+HWCAP2_SVEBF16
+
+    Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
+
+HWCAP2_I8MM
+
+    Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
+
+HWCAP2_BF16
+
+    Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0001.
+
+HWCAP2_DGH
+
+    Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001.
+
+HWCAP2_RNG
+
+    Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
 
 4. Unused AT_HWCAP bits
 -----------------------
index 99b2545..9120e59 100644 (file)
@@ -88,6 +88,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A76      | #1463225        | ARM64_ERRATUM_1463225       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A55      | #1530923        | ARM64_ERRATUM_1530923       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N1     | #1188873,1418040| ARM64_ERRATUM_1418040       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N1     | #1349291        | N/A                         |
index fcedc53..640934b 100644 (file)
@@ -25,10 +25,6 @@ good performance with large indices.  If your index can be larger than
 ``ULONG_MAX`` then the XArray is not the data type for you.  The most
 important user of the XArray is the page cache.
 
-Each non-``NULL`` entry in the array has three bits associated with
-it called marks.  Each mark may be set or cleared independently of
-the others.  You can iterate over entries which are marked.
-
 Normal pointers may be stored in the XArray directly.  They must be 4-byte
 aligned, which is true for any pointer returned from kmalloc() and
 alloc_page().  It isn't true for arbitrary user-space pointers,
@@ -41,12 +37,11 @@ When you retrieve an entry from the XArray, you can check whether it is
 a value entry by calling xa_is_value(), and convert it back to
 an integer by calling xa_to_value().
 
-Some users want to store tagged pointers instead of using the marks
-described above.  They can call xa_tag_pointer() to create an
-entry with a tag, xa_untag_pointer() to turn a tagged entry
-back into an untagged pointer and xa_pointer_tag() to retrieve
-the tag of an entry.  Tagged pointers use the same bits that are used
-to distinguish value entries from normal pointers, so each user must
+Some users want to tag the pointers they store in the XArray.  You can
+call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
+to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
+to retrieve the tag of an entry.  Tagged pointers use the same bits that
+are used to distinguish value entries from normal pointers, so you must
 decide whether they want to store value entries or tagged pointers in
 any particular XArray.
 
@@ -56,10 +51,9 @@ conflict with value entries or internal entries.
 An unusual feature of the XArray is the ability to create entries which
 occupy a range of indices.  Once stored to, looking up any index in
 the range will return the same entry as looking up any other index in
-the range.  Setting a mark on one index will set it on all of them.
-Storing to any index will store to all of them.  Multi-index entries can
-be explicitly split into smaller entries, or storing ``NULL`` into any
-entry will cause the XArray to forget about the range.
+the range.  Storing to any index will store to all of them.  Multi-index
+entries can be explicitly split into smaller entries, or storing ``NULL``
+into any entry will cause the XArray to forget about the range.
 
 Normal API
 ==========
@@ -87,17 +81,11 @@ If you want to only store a new entry to an index if the current entry
 at that index is ``NULL``, you can use xa_insert() which
 returns ``-EBUSY`` if the entry is not empty.
 
-You can enquire whether a mark is set on an entry by using
-xa_get_mark().  If the entry is not ``NULL``, you can set a mark
-on it by using xa_set_mark() and remove the mark from an entry by
-calling xa_clear_mark().  You can ask whether any entry in the
-XArray has a particular mark set by calling xa_marked().
-
 You can copy entries out of the XArray into a plain array by calling
-xa_extract().  Or you can iterate over the present entries in
-the XArray by calling xa_for_each().  You may prefer to use
-xa_find() or xa_find_after() to move to the next present
-entry in the XArray.
+xa_extract().  Or you can iterate over the present entries in the XArray
+by calling xa_for_each(), xa_for_each_start() or xa_for_each_range().
+You may prefer to use xa_find() or xa_find_after() to move to the next
+present entry in the XArray.
 
 Calling xa_store_range() stores the same entry in a range
 of indices.  If you do this, some of the other operations will behave
@@ -124,6 +112,31 @@ xa_destroy().  If the XArray entries are pointers, you may wish
 to free the entries first.  You can do this by iterating over all present
 entries in the XArray using the xa_for_each() iterator.
 
+Search Marks
+------------
+
+Each entry in the array has three bits associated with it called marks.
+Each mark may be set or cleared independently of the others.  You can
+iterate over marked entries by using the xa_for_each_marked() iterator.
+
+You can enquire whether a mark is set on an entry by using
+xa_get_mark().  If the entry is not ``NULL``, you can set a mark on it
+by using xa_set_mark() and remove the mark from an entry by calling
+xa_clear_mark().  You can ask whether any entry in the XArray has a
+particular mark set by calling xa_marked().  Erasing an entry from the
+XArray causes all marks associated with that entry to be cleared.
+
+Setting or clearing a mark on any index of a multi-index entry will
+affect all indices covered by that entry.  Querying the mark on any
+index will return the same result.
+
+There is no way to iterate over entries which are not marked; the data
+structure does not allow this to be implemented efficiently.  There are
+not currently iterators to search for logical combinations of bits (eg
+iterate over all entries which have both ``XA_MARK_1`` and ``XA_MARK_2``
+set, or iterate over all entries which have ``XA_MARK_0`` or ``XA_MARK_2``
+set).  It would be possible to add these if a user arises.
+
 Allocating XArrays
 ------------------
 
@@ -180,6 +193,8 @@ No lock needed:
 Takes RCU read lock:
  * xa_load()
  * xa_for_each()
+ * xa_for_each_start()
+ * xa_for_each_range()
  * xa_find()
  * xa_find_after()
  * xa_extract()
@@ -419,10 +434,9 @@ you last processed.  If you have interrupts disabled while iterating,
 then it is good manners to pause the iteration and reenable interrupts
 every ``XA_CHECK_SCHED`` entries.
 
-The xas_get_mark(), xas_set_mark() and
-xas_clear_mark() functions require the xa_state cursor to have
-been moved to the appropriate location in the xarray; they will do
-nothing if you have called xas_pause() or xas_set()
+The xas_get_mark(), xas_set_mark() and xas_clear_mark() functions require
+the xa_state cursor to have been moved to the appropriate location in the
+XArray; they will do nothing if you have called xas_pause() or xas_set()
 immediately before.
 
 You can call xas_set_update() to have a callback function
index 29dd3cc..e77b08e 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
 - compatible :
        - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
        - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
+       - "fsl,fsl,ls1028a-edma" for eDMA used similar to that on Vybrid vf610 SoC
 - reg : Specifies base physical address(s) and size of the eDMA registers.
        The 1st region is eDMA control register's address and size.
        The 2nd and the 3rd regions are programmable channel multiplexing
index 9d8bbac..c9e9740 100644 (file)
@@ -10,6 +10,9 @@ Required properties:
       "fsl,imx6q-sdma"
       "fsl,imx7d-sdma"
       "fsl,imx8mq-sdma"
+      "fsl,imx8mm-sdma"
+      "fsl,imx8mn-sdma"
+      "fsl,imx8mp-sdma"
   The -to variants should be preferred since they allow to determine the
   correct ROM script addresses needed for the driver to work without additional
   firmware.
index ec89782..3459e77 100644 (file)
@@ -1,4 +1,4 @@
-* Ingenic JZ4780 DMA Controller
+* Ingenic XBurst DMA Controller
 
 Required properties:
 
@@ -8,10 +8,12 @@ Required properties:
   * ingenic,jz4770-dma
   * ingenic,jz4780-dma
   * ingenic,x1000-dma
+  * ingenic,x1830-dma
 - reg: Should contain the DMA channel registers location and length, followed
   by the DMA controller registers location and length.
 - interrupts: Should contain the interrupt specifier of the DMA controller.
-- clocks: Should contain a clock specifier for the JZ4780/X1000 PDMA clock.
+- clocks: Should contain a clock specifier for the JZ4780/X1000/X1830 PDMA
+  clock.
 - #dma-cells: Must be <2>. Number of integer cells in the dmas property of
   DMA clients (see below).
 
index 5551e92..b7f81c6 100644 (file)
@@ -30,6 +30,7 @@ Required Properties:
                - "renesas,dmac-r8a7794" (R-Car E2)
                - "renesas,dmac-r8a7795" (R-Car H3)
                - "renesas,dmac-r8a7796" (R-Car M3-W)
+               - "renesas,dmac-r8a77961" (R-Car M3-W+)
                - "renesas,dmac-r8a77965" (R-Car M3-N)
                - "renesas,dmac-r8a77970" (R-Car V3M)
                - "renesas,dmac-r8a77980" (R-Car V3H)
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
new file mode 100644 (file)
index 0000000..8b5c346
--- /dev/null
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
+
+maintainers:
+  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+description: |
+  The UDMA-P is intended to perform similar (but significantly upgraded)
+  functions as the packet-oriented DMA used on previous SoC devices. The UDMA-P
+  module supports the transmission and reception of various packet types.
+  The UDMA-P architecture facilitates the segmentation and reassembly of SoC DMA
+  data structure compliant packets to/from smaller data blocks that are natively
+  compatible with the specific requirements of each connected peripheral.
+  Multiple Tx and Rx channels are provided within the DMA which allow multiple
+  segmentation or reassembly operations to be ongoing. The DMA controller
+  maintains state information for each of the channels which allows packet
+  segmentation and reassembly operations to be time division multiplexed between
+  channels in order to share the underlying DMA hardware. An external DMA
+  scheduler is used to control the ordering and rate at which this multiplexing
+  occurs for Transmit operations. The ordering and rate of Receive operations
+  is indirectly controlled by the order in which blocks are pushed into the DMA
+  on the Rx PSI-L interface.
+
+  The UDMA-P also supports acting as both a UTC and UDMA-C for its internal
+  channels. Channels in the UDMA-P can be configured to be either Packet-Based
+  or Third-Party channels on a channel by channel basis.
+
+  All transfers within NAVSS is done between PSI-L source and destination
+  threads.
+  The peripherals serviced by UDMA can be PSI-L native (sa2ul, cpsw, etc) or
+  legacy, non PSI-L native peripherals. In the later case a special, small PDMA
+  is tasked to act as a bridge between the PSI-L fabric and the legacy
+  peripheral.
+
+  PDMAs can be configured via UDMAP peer registers to match with the
+  configuration of the legacy peripheral.
+
+allOf:
+  - $ref: "../dma-controller.yaml#"
+
+properties:
+  "#dma-cells":
+    const: 1
+    description: |
+      The cell is the PSI-L  thread ID of the remote (to UDMAP) end.
+      Valid ranges for thread ID depends on the data movement direction:
+      for source thread IDs (rx): 0 - 0x7fff
+      for destination thread IDs (tx): 0x8000 - 0xffff
+
+      Please refer to the device documentation for the PSI-L thread map and also
+      the PSI-L peripheral chapter for the correct thread ID.
+
+  compatible:
+    enum:
+      - ti,am654-navss-main-udmap
+      - ti,am654-navss-mcu-udmap
+      - ti,j721e-navss-main-udmap
+      - ti,j721e-navss-mcu-udmap
+
+  reg:
+    maxItems: 3
+
+  reg-names:
+   items:
+     - const: gcfg
+     - const: rchanrt
+     - const: tchanrt
+
+  msi-parent: true
+
+  ti,sci:
+    description: phandle to TI-SCI compatible System controller node
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+  ti,sci-dev-id:
+    description: TI-SCI device id of UDMAP
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+
+  ti,ringacc:
+    description: phandle to the ring accelerator node
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+  ti,sci-rm-range-tchan:
+    description: |
+      Array of UDMA tchan resource subtypes for resource allocation for this
+      host
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 1
+    # Should be enough
+    maxItems: 255
+
+  ti,sci-rm-range-rchan:
+    description: |
+      Array of UDMA rchan resource subtypes for resource allocation for this
+      host
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 1
+    # Should be enough
+    maxItems: 255
+
+  ti,sci-rm-range-rflow:
+    description: |
+      Array of UDMA rflow resource subtypes for resource allocation for this
+      host
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 1
+    # Should be enough
+    maxItems: 255
+
+required:
+  - compatible
+  - "#dma-cells"
+  - reg
+  - reg-names
+  - msi-parent
+  - ti,sci
+  - ti,sci-dev-id
+  - ti,ringacc
+  - ti,sci-rm-range-tchan
+  - ti,sci-rm-range-rchan
+  - ti,sci-rm-range-rflow
+
+examples:
+  - |+
+    cbass_main {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        cbass_main_navss: navss@30800000 {
+            compatible = "simple-mfd";
+            #address-cells = <2>;
+            #size-cells = <2>;
+            dma-coherent;
+            dma-ranges;
+            ranges;
+
+            ti,sci-dev-id = <118>;
+
+            main_udmap: dma-controller@31150000 {
+                compatible = "ti,am654-navss-main-udmap";
+                reg = <0x0 0x31150000 0x0 0x100>,
+                      <0x0 0x34000000 0x0 0x100000>,
+                      <0x0 0x35000000 0x0 0x100000>;
+                reg-names = "gcfg", "rchanrt", "tchanrt";
+                #dma-cells = <1>;
+
+                ti,ringacc = <&ringacc>;
+
+                msi-parent = <&inta_main_udmass>;
+
+                ti,sci = <&dmsc>;
+                ti,sci-dev-id = <188>;
+
+                ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
+                                        <0x2>; /* TX_CHAN */
+                ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
+                                        <0x5>; /* RX_CHAN */
+                ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+            };
+        };
+
+        mcasp0: mcasp@02B00000 {
+            dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+            dma-names = "tx", "rx";
+        };
+
+        crypto: crypto@4E00000 {
+            compatible = "ti,sa2ul-crypto";
+
+            dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>;
+            dma-names = "tx", "rx1", "rx2";
+        };
+    };
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
new file mode 100644 (file)
index 0000000..2a98220
--- /dev/null
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/adi,adm1177.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
+
+maintainers:
+  - Michael Hennerich <michael.hennerich@analog.com>
+  - Beniamin Bia <beniamin.bia@analog.com>
+
+description: |
+  Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
+  https://www.analog.com/media/en/technical-documentation/data-sheets/ADM1177.pdf
+
+properties:
+  compatible:
+    enum:
+      - adi,adm1177
+
+  reg:
+    maxItems: 1
+
+  avcc-supply:
+    description:
+      Phandle to the Avcc power supply
+
+  shunt-resistor-micro-ohms:
+    description:
+      The value of curent sense resistor in microohms. If not provided,
+      the current reading and overcurrent alert is disabled.
+
+  adi,shutdown-threshold-microamp:
+    description:
+      Specifies the current level at which an over current alert occurs.
+      If not provided, the overcurrent alert is configured to max ADC range
+      based on shunt-resistor-micro-ohms.
+
+  adi,vrange-high-enable:
+    description:
+      Specifies which internal voltage divider to be used. A 1 selects
+      a 7:2 voltage divider while a 0 selects a 14:1 voltage divider.
+    type: boolean
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pwmon@5a {
+                compatible = "adi,adm1177";
+                reg = <0x5a>;
+                shunt-resistor-micro-ohms = <50000>; /* 50 mOhm */
+                adi,shutdown-threshold-microamp = <1059000>; /* 1.059 A */
+                adi,vrange-high-enable;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
new file mode 100644 (file)
index 0000000..5d42e13
--- /dev/null
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/pmbus/ti,ucd90320.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UCD90320 power sequencer
+
+maintainers:
+  - Jim Wright <wrightj@linux.vnet.ibm.com>
+
+description: |
+  The UCD90320 is a 32-rail PMBus/I2C addressable power-supply sequencer and
+  monitor. The 24 integrated ADC channels (AMONx) monitor the power supply
+  voltage, current, and temperature. Of the 84 GPIO pins, 8 can be used as
+  digital monitors (DMONx), 32 to enable the power supply (ENx), 24 for
+  margining (MARx), 16 for logical GPO, and 32 GPIs for cascading, and system
+  function.
+
+  http://focus.ti.com/lit/ds/symlink/ucd90320.pdf
+
+properties:
+  compatible:
+    enum:
+      - ti,ucd90320
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        ucd90320@11 {
+            compatible = "ti,ucd90320";
+            reg = <0x11>;
+        };
+    };
index 733b64a..ae20741 100644 (file)
@@ -11,28 +11,43 @@ Required properties:
 - compatible: should be one of the following
   - "brcm,bcm7425-sdhci"
   - "brcm,bcm7445-sdhci"
+  - "brcm,bcm7216-sdhci"
 
 Refer to clocks/clock-bindings.txt for generic clock consumer properties.
 
 Example:
 
-       sdhci@f03e0100 {
-               compatible = "brcm,bcm7425-sdhci";
-               reg = <0xf03e0000 0x100>;
-               interrupts = <0x0 0x26 0x0>;
-               sdhci,auto-cmd12;
-               clocks = <&sw_sdio>;
+       sdhci@84b0000 {
                sd-uhs-sdr50;
                sd-uhs-ddr50;
+               sd-uhs-sdr104;
+               sdhci,auto-cmd12;
+               compatible = "brcm,bcm7216-sdhci",
+                          "brcm,bcm7445-sdhci",
+                          "brcm,sdhci-brcmstb";
+               reg = <0x84b0000 0x260 0x84b0300 0x200>;
+               reg-names = "host", "cfg";
+               interrupts = <0x0 0x26 0x4>;
+               interrupt-names = "sdio0_0";
+               clocks = <&scmi_clk 245>;
+               clock-names = "sw_sdio";
        };
 
-       sdhci@f03e0300 {
+       sdhci@84b1000 {
+               mmc-ddr-1_8v;
+               mmc-hs200-1_8v;
+               mmc-hs400-1_8v;
+               mmc-hs400-enhanced-strobe;
+               supports-cqe;
                non-removable;
                bus-width = <0x8>;
-               compatible = "brcm,bcm7425-sdhci";
-               reg = <0xf03e0200 0x100>;
-               interrupts = <0x0 0x27 0x0>;
-               sdhci,auto-cmd12;
-               clocks = <sw_sdio>;
-               mmc-hs200-1_8v;
+               compatible = "brcm,bcm7216-sdhci",
+                          "brcm,bcm7445-sdhci",
+                          "brcm,sdhci-brcmstb";
+               reg = <0x84b1000 0x260 0x84b1300 0x200>;
+               reg-names = "host", "cfg";
+               interrupts = <0x0 0x27 0x4>;
+               interrupt-names = "sdio1_0";
+               clocks = <&scmi_clk 245>;
+               clock-names = "sw_sdio";
        };
index 2fb466c..c93643f 100644 (file)
@@ -21,6 +21,7 @@ Required properties:
               "fsl,imx8mq-usdhc"
               "fsl,imx8mm-usdhc"
               "fsl,imx8mn-usdhc"
+              "fsl,imx8mp-usdhc"
               "fsl,imx8qxp-usdhc"
 
 Optional properties:
index bc08fc4..e6cc478 100644 (file)
@@ -23,7 +23,8 @@ Required properties:
                "renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC
                "renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
                "renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
-               "renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+               "renesas,sdhi-r8a7796" - SDHI IP on R8A77960 SoC
+               "renesas,sdhi-r8a77961" - SDHI IP on R8A77961 SoC
                "renesas,sdhi-r8a77965" - SDHI IP on R8A77965 SoC
                "renesas,sdhi-r8a77970" - SDHI IP on R8A77970 SoC
                "renesas,sdhi-r8a77980" - SDHI IP on R8A77980 SoC
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
deleted file mode 100644 (file)
index 6f629b1..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-* Rockchip specific extensions to the Synopsys Designware Mobile
-  Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsys dw mshc controller properties described
-by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
-       - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
-                                                       before RK3288
-       - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
-       - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
-       - "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip PX30
-       - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
-       - "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK322x
-       - "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
-       - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
-       - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
-
-Optional Properties:
-* clocks: from common clock binding: if ciu-drive and ciu-sample are
-  specified in clock-names, should contain handles to these clocks.
-
-* clock-names: Apart from the clock-names described in synopsys-dw-mshc.txt
-  two more clocks "ciu-drive" and "ciu-sample" are supported. They are used
-  to control the clock phases, "ciu-sample" is required for tuning high-
-  speed modes.
-
-* rockchip,default-sample-phase: The default phase to set ciu-sample at
-  probing, low speeds or in case where all phases work at tuning time.
-  If not specified 0 deg will be used.
-
-* rockchip,desired-num-phases: The desired number of times that the host
-  execute tuning when needed. If not specified, the host will do tuning
-  for 360 times, namely tuning for each degree.
-
-Example:
-
-       rkdwmmc0@12200000 {
-               compatible = "rockchip,rk3288-dw-mshc";
-               reg = <0x12200000 0x1000>;
-               interrupts = <0 75 0>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
new file mode 100644 (file)
index 0000000..89c3edd
--- /dev/null
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/rockchip-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip designware mobile storage host controller device tree bindings
+
+description:
+  Rockchip uses the Synopsys designware mobile storage host controller
+  to interface a SoC with storage medium such as eMMC or SD/MMC cards.
+  This file documents the combined properties for the core Synopsys dw mshc
+  controller that are not already included in the synopsys-dw-mshc-common.yaml
+  file and the Rockchip specific extensions.
+
+allOf:
+  - $ref: "synopsys-dw-mshc-common.yaml#"
+
+maintainers:
+  - Heiko Stuebner <heiko@sntech.de>
+
+# Everything else is described in the common file
+properties:
+  compatible:
+    oneOf:
+      # for Rockchip RK2928 and before RK3288
+      - const: rockchip,rk2928-dw-mshc
+      # for Rockchip RK3288
+      - const: rockchip,rk3288-dw-mshc
+      - items:
+          - enum:
+            # for Rockchip PX30
+            - rockchip,px30-dw-mshc
+            # for Rockchip RK3036
+            - rockchip,rk3036-dw-mshc
+            # for Rockchip RK322x
+            - rockchip,rk3228-dw-mshc
+            # for Rockchip RK3308
+            - rockchip,rk3308-dw-mshc
+            # for Rockchip RK3328
+            - rockchip,rk3328-dw-mshc
+            # for Rockchip RK3368
+            - rockchip,rk3368-dw-mshc
+            # for Rockchip RK3399
+            - rockchip,rk3399-dw-mshc
+            # for Rockchip RV1108
+            - rockchip,rv1108-dw-mshc
+          - const: rockchip,rk3288-dw-mshc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 4
+    description:
+      Handle to "biu" and "ciu" clocks for the bus interface unit clock and
+      the card interface unit clock. If "ciu-drive" and "ciu-sample" are
+      specified in clock-names, it should also contain
+      handles to these clocks.
+
+  clock-names:
+    minItems: 2
+    items:
+      - const: biu
+      - const: ciu
+      - const: ciu-drive
+      - const: ciu-sample
+    description:
+      Apart from the clock-names "biu" and "ciu" two more clocks
+      "ciu-drive" and "ciu-sample" are supported. They are used
+      to control the clock phases, "ciu-sample" is required for tuning
+      high speed modes.
+
+  rockchip,default-sample-phase:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 360
+    default: 0
+    description:
+      The default phase to set "ciu-sample" at probing,
+      low speeds or in case where all phases work at tuning time.
+      If not specified 0 deg will be used.
+
+  rockchip,desired-num-phases:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 360
+    default: 360
+    description:
+      The desired number of times that the host execute tuning when needed.
+      If not specified, the host will do tuning for 360 times,
+      namely tuning for each degree.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3288-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    sdmmc: mmc@ff0c0000 {
+      compatible = "rockchip,rk3288-dw-mshc";
+      reg = <0x0 0xff0c0000 0x0 0x4000>;
+      interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+               <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+      clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+      resets = <&cru SRST_MMC0>;
+      reset-names = "reset";
+      fifo-depth = <0x100>;
+      max-frequency = <150000000>;
+    };
+
+...
index 503c6db..69edfd4 100644 (file)
@@ -5,11 +5,16 @@ Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the
 sdhci-of-at91 driver.
 
 Required properties:
-- compatible:          Must be "atmel,sama5d2-sdhci".
+- compatible:          Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci".
 - clocks:              Phandlers to the clocks.
-- clock-names:         Must be "hclock", "multclk", "baseclk";
+- clock-names:         Must be "hclock", "multclk", "baseclk" for
+                       "atmel,sama5d2-sdhci".
+                       Must be "hclock", "multclk" for "microchip,sam9x60-sdhci".
 
 Optional properties:
+- assigned-clocks:     The same with "multclk".
+- assigned-clock-rates The rate of "multclk" in order to not rely on the
+                       gck configuration set by previous components.
 - microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is
   inverted. The default polarity for this signal is described in the datasheet.
   For instance on SAMA5D2, the pin is usually tied to the GND with a resistor
@@ -17,10 +22,12 @@ Optional properties:
 
 Example:
 
-sdmmc0: sdio-host@a0000000 {
+mmc0: sdio-host@a0000000 {
        compatible = "atmel,sama5d2-sdhci";
        reg = <0xa0000000 0x300>;
        interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
        clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>;
        clock-names = "hclock", "multclk", "baseclk";
+       assigned-clocks = <&sdmmc0_gclk>;
+       assigned-clock-rates = <480000000>;
 };
index da4edb1..7ee639b 100644 (file)
@@ -19,6 +19,7 @@ Required properties:
                "qcom,msm8996-sdhci", "qcom,sdhci-msm-v4"
                "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
                "qcom,qcs404-sdhci", "qcom,sdhci-msm-v5"
+               "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
        NOTE that some old device tree files may be floating around that only
        have the string "qcom,sdhci-msm-v4" without the SoC compatible string
        but doing that should be considered a deprecated practice.
index 72c4dec..aeb615e 100644 (file)
@@ -7,6 +7,8 @@ For UHS devices which require tuning, the device tree should have a "cpu_thermal
 Required properties:
 - compatible: Should be "ti,dra7-sdhci" for DRA7 and DRA72 controllers
              Should be "ti,k2g-sdhci" for K2G
+             Should be "ti,am335-sdhci" for am335x controllers
+             Should be "ti,am437-sdhci" for am437x controllers
 - ti,hwmods: Must be "mmc<n>", <n> is controller instance starting 1
             (Not required for K2G).
 - pinctrl-names: Should be subset of "default", "hs", "sdr12", "sdr25", "sdr50",
@@ -15,6 +17,13 @@ Required properties:
                 "hs200_1_8v",
 - pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
 
+Optional properties:
+- dmas:                List of DMA specifiers with the controller specific format as described
+               in the generic DMA client binding. A tx and rx specifier is required.
+- dma-names:   List of DMA request names. These strings correspond 1:1 with the
+               DMA specifiers listed in dmas. The string naming is to be "tx"
+               and "rx" for TX and RX DMA requests, respectively.
+
 Example:
        mmc1: mmc@4809c000 {
                compatible = "ti,dra7-sdhci";
@@ -22,4 +31,6 @@ Example:
                ti,hwmods = "mmc1";
                bus-width = <4>;
                vmmc-supply = <&vmmc>; /* phandle to regulator node */
+               dmas = <&sdma 61 &sdma 62>;
+               dma-names = "tx", "rx";
        };
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml
new file mode 100644 (file)
index 0000000..890d47a
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Mobile Storage Host Controller Common Properties
+
+allOf:
+  - $ref: "mmc-controller.yaml#"
+
+maintainers:
+  - Ulf Hansson <ulf.hansson@linaro.org>
+
+# Everything else is described in the common file
+properties:
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: reset
+
+  clock-frequency:
+    description:
+      Should be the frequency (in Hz) of the ciu clock.  If this
+      is specified and the ciu clock is specified then we'll try to set the ciu
+      clock to this at probe time.
+
+  fifo-depth:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      The maximum size of the tx/rx fifo's. If this property is not
+      specified, the default value of the fifo size is determined from the
+      controller registers.
+
+  card-detect-delay:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - default: 0
+    description:
+      Delay in milli-seconds before detecting card after card
+      insert event. The default value is 0.
+
+  data-addr:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      Override fifo address with value provided by DT. The default FIFO reg
+      offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A)
+      by driver. If the controller does not follow this rule, please use
+      this property to set fifo address in device tree.
+
+  fifo-watermark-aligned:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      Data done irq is expected if data length is less than
+      watermark in PIO mode. But fifo watermark is requested to be aligned
+      with data length in some SoC so that TX/RX irq can be generated with
+      data done irq. Add this watermark quirk to mark this requirement and
+      force fifo watermark setting accordingly.
+
+  dmas:
+    maxItems: 1
+
+  dma-names:
+    const: rx-tx
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
deleted file mode 100644 (file)
index 7e5e427..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-* Synopsys Designware Mobile Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
-       - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
-* #address-cells: should be 1.
-* #size-cells: should be 0.
-
-# Slots (DEPRECATED): The slot specific information are contained within
-  child-nodes with each child-node representing a supported slot. There should
-  be atleast one child node representing a card slot. The name of the child node
-  representing the slot is recommended to be slot@n where n is the unique number
-  of the slot connected to the controller. The following are optional properties
-  which can be included in the slot child node.
-
-       * reg: specifies the physical slot number. The valid values of this
-         property is 0 to (num-slots -1), where num-slots is the value
-         specified by the num-slots property.
-
-       * bus-width: as documented in mmc core bindings.
-
-       * wp-gpios: specifies the write protect gpio line. The format of the
-         gpio specifier depends on the gpio controller. If a GPIO is not used
-         for write-protect, this property is optional.
-
-       * disable-wp: If the wp-gpios property isn't present then (by default)
-         we'd assume that the write protect is hooked up directly to the
-         controller's special purpose write protect line (accessible via
-         the WRTPRT register).  However, it's possible that we simply don't
-         want write protect.  In that case specify 'disable-wp'.
-         NOTE: This property is not required for slots known to always
-         connect to eMMC or SDIO cards.
-
-Optional properties:
-
-* resets: phandle + reset specifier pair, intended to represent hardware
-  reset signal present internally in some host controller IC designs.
-  See Documentation/devicetree/bindings/reset/reset.txt for details.
-
-* reset-names: request name for using "resets" property. Must be "reset".
-       (It will be used together with "resets" property.)
-
-* clocks: from common clock binding: handle to biu and ciu clocks for the
-  bus interface unit clock and the card interface unit clock.
-
-* clock-names: from common clock binding: Shall be "biu" and "ciu".
-  If the biu clock is missing we'll simply skip enabling it.  If the
-  ciu clock is missing we'll just assume that the clock is running at
-  clock-frequency.  It is an error to omit both the ciu clock and the
-  clock-frequency.
-
-* clock-frequency: should be the frequency (in Hz) of the ciu clock.  If this
-  is specified and the ciu clock is specified then we'll try to set the ciu
-  clock to this at probe time.
-
-* fifo-depth: The maximum size of the tx/rx fifo's. If this property is not
-  specified, the default value of the fifo size is determined from the
-  controller registers.
-
-* card-detect-delay: Delay in milli-seconds before detecting card after card
-  insert event. The default value is 0.
-
-* data-addr: Override fifo address with value provided by DT. The default FIFO reg
-  offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A) by
-  driver. If the controller does not follow this rule, please use this property
-  to set fifo address in device tree.
-
-* fifo-watermark-aligned: Data done irq is expected if data length is less than
-  watermark in PIO mode. But fifo watermark is requested to be aligned with data
-  length in some SoC so that TX/RX irq can be generated with data done irq. Add this
-  watermark quirk to mark this requirement and force fifo watermark setting
-  accordingly.
-
-* vmmc-supply: The phandle to the regulator to use for vmmc.  If this is
-  specified we'll defer probe until we can find this regulator.
-
-* dmas: List of DMA specifiers with the controller specific format as described
-  in the generic DMA client binding. Refer to dma.txt for details.
-
-* dma-names: request names for generic DMA client binding. Must be "rx-tx".
-  Refer to dma.txt for details.
-
-Aliases:
-
-- All the MSHC controller nodes should be represented in the aliases node using
-  the following format 'mshc{n}' where n is a unique number for the alias.
-
-Example:
-
-The MSHC controller node can be split into two portions, SoC specific and
-board specific portions as listed below.
-
-       dwmmc0@12200000 {
-               compatible = "snps,dw-mshc";
-               clocks = <&clock 351>, <&clock 132>;
-               clock-names = "biu", "ciu";
-               reg = <0x12200000 0x1000>;
-               interrupts = <0 75 0>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               data-addr = <0x200>;
-               fifo-watermark-aligned;
-               resets = <&rst 20>;
-               reset-names = "reset";
-       };
-
-[board specific internal DMA resources]
-
-       dwmmc0@12200000 {
-               clock-frequency = <400000000>;
-               clock-freq-min-max = <400000 200000000>;
-               broken-cd;
-               fifo-depth = <0x80>;
-               card-detect-delay = <200>;
-               vmmc-supply = <&buck8>;
-               bus-width = <8>;
-               cap-mmc-highspeed;
-               cap-sd-highspeed;
-       };
-
-[board specific generic DMA request binding]
-
-       dwmmc0@12200000 {
-               clock-frequency = <400000000>;
-               clock-freq-min-max = <400000 200000000>;
-               broken-cd;
-               fifo-depth = <0x80>;
-               card-detect-delay = <200>;
-               vmmc-supply = <&buck8>;
-               bus-width = <8>;
-               cap-mmc-highspeed;
-               cap-sd-highspeed;
-               dmas = <&pdma 12>;
-               dma-names = "rx-tx";
-       };
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml
new file mode 100644 (file)
index 0000000..05f9f36
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Mobile Storage Host Controller Binding
+
+allOf:
+  - $ref: "synopsys-dw-mshc-common.yaml#"
+
+maintainers:
+  - Ulf Hansson <ulf.hansson@linaro.org>
+
+# Everything else is described in the common file
+properties:
+  compatible:
+    const: snps,dw-mshc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 2
+    description:
+      Handle to "biu" and "ciu" clocks for the
+      bus interface unit clock and the card interface unit clock.
+
+  clock-names:
+    items:
+      - const: biu
+      - const: ciu
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+examples:
+  - |
+    mmc@12200000 {
+      compatible = "snps,dw-mshc";
+      reg = <0x12200000 0x1000>;
+      interrupts = <0 75 0>;
+      clocks = <&clock 351>, <&clock 132>;
+      clock-names = "biu", "ciu";
+      dmas = <&pdma 12>;
+      dma-names = "rx-tx";
+      resets = <&rst 20>;
+      reset-names = "reset";
+      vmmc-supply = <&buck8>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+      broken-cd;
+      bus-width = <8>;
+      cap-mmc-highspeed;
+      cap-sd-highspeed;
+      card-detect-delay = <200>;
+      clock-freq-min-max = <400000 200000000>;
+      clock-frequency = <400000000>;
+      data-addr = <0x200>;
+      fifo-depth = <0x80>;
+      fifo-watermark-aligned;
+    };
index 299c0dc..250f8d8 100644 (file)
@@ -403,6 +403,19 @@ PROPERTIES
                The settings and programming routines for internal/external
                MDIO are different. Must be included for internal MDIO.
 
+- fsl,erratum-a011043
+               Usage: optional
+               Value type: <boolean>
+               Definition: Indicates the presence of the A011043 erratum
+               describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
+               set when reading internal PCS registers. MDIO reads to
+               internal PCS registers may result in having the
+               MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
+               read data (MDIO_DATA[MDIO_DATA]) is correct.
+               Software may get false read error when reading internal
+               PCS registers through MDIO. As a workaround, all internal
+               MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
+
 For internal PHY device on internal mdio bus, a PHY node should be created.
 See the definition of the PHY node in booting-without-of.txt for an
 example of how to define a PHY (Internal PHY has no interrupt line).
diff --git a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt
new file mode 100644 (file)
index 0000000..59758cc
--- /dev/null
@@ -0,0 +1,59 @@
+* Texas Instruments K3 NavigatorSS Ring Accelerator
+
+The Ring Accelerator (RA) is a machine which converts read/write accesses
+from/to a constant address into corresponding read/write accesses from/to a
+circular data structure in memory. The RA eliminates the need for each DMA
+controller which needs to access ring elements from having to know the current
+state of the ring (base address, current offset). The DMA controller
+performs a read or write access to a specific address range (which maps to the
+source interface on the RA) and the RA replaces the address for the transaction
+with a new address which corresponds to the head or tail element of the ring
+(head for reads, tail for writes).
+
+The Ring Accelerator is a hardware module that is responsible for accelerating
+management of the packet queues. The K3 SoCs can have more than one RA instances
+
+Required properties:
+- compatible   : Must be "ti,am654-navss-ringacc";
+- reg          : Should contain register location and length of the following
+                 named register regions.
+- reg-names    : should be
+                 "rt" - The RA Ring Real-time Control/Status Registers
+                 "fifos" - The RA Queues Registers
+                 "proxy_gcfg" - The RA Proxy Global Config Registers
+                 "proxy_target" - The RA Proxy Datapath Registers
+- ti,num-rings : Number of rings supported by RA
+- ti,sci-rm-range-gp-rings : TI-SCI RM subtype for GP ring range
+- ti,sci       : phandle on TI-SCI compatible System controller node
+- ti,sci-dev-id        : TI-SCI device id of the ring accelerator
+- msi-parent   : phandle for "ti,sci-inta" interrupt controller
+
+Optional properties:
+ -- ti,dma-ring-reset-quirk : enable ringacc / udma ring state interoperability
+                 issue software w/a
+
+Example:
+
+ringacc: ringacc@3c000000 {
+       compatible = "ti,am654-navss-ringacc";
+       reg =   <0x0 0x3c000000 0x0 0x400000>,
+               <0x0 0x38000000 0x0 0x400000>,
+               <0x0 0x31120000 0x0 0x100>,
+               <0x0 0x33000000 0x0 0x40000>;
+       reg-names = "rt", "fifos",
+                   "proxy_gcfg", "proxy_target";
+       ti,num-rings = <818>;
+       ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+       ti,dma-ring-reset-quirk;
+       ti,sci = <&dmsc>;
+       ti,sci-dev-id = <187>;
+       msi-parent = <&inta_main_udmass>;
+};
+
+client:
+
+dma_ipx: dma_ipx@<addr> {
+       ...
+       ti,ringacc = <&ringacc>;
+       ...
+}
index 1fd9a44..b98203c 100644 (file)
@@ -12,6 +12,7 @@ Required properties:
  - clock-names: Should be "clk_apb5".
  - pinctrl-names : a pinctrl state named "default" must be defined.
  - pinctrl-0 : phandle referencing pin configuration of the device.
+ - resets : phandle to the reset control for this device.
  - cs-gpios: Specifies the gpio pins to be used for chipselects.
             See: Documentation/devicetree/bindings/spi/spi-bus.txt
 
@@ -19,16 +20,6 @@ Optional properties:
 - clock-frequency : Input clock frequency to the PSPI block in Hz.
                    Default is 25000000 Hz.
 
-Aliases:
-- All the SPI controller nodes should be represented in the aliases node using
-  the following format 'spi{n}' withe the correct numbered in "aliases" node.
-
-Example:
-
-aliases {
-       spi0 = &spi0;
-};
-
 spi0: spi@f0200000 {
        compatible = "nuvoton,npcm750-pspi";
        reg = <0xf0200000 0x1000>;
@@ -39,5 +30,6 @@ spi0: spi@f0200000 {
        interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
        clocks = <&clk NPCM7XX_CLK_APB5>;
        clock-names = "clk_apb5";
+       resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>
        cs-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
 };
diff --git a/Documentation/devicetree/bindings/spi/spi-stm32.txt b/Documentation/devicetree/bindings/spi/spi-stm32.txt
deleted file mode 100644 (file)
index d82755c..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-STMicroelectronics STM32 SPI Controller
-
-The STM32 SPI controller is used to communicate with external devices using
-the Serial Peripheral Interface. It supports full-duplex, half-duplex and
-simplex synchronous serial communication with external devices. It supports
-from 4 to 32-bit data size. Although it can be configured as master or slave,
-only master is supported by the driver.
-
-Required properties:
-- compatible: Should be one of:
-  "st,stm32h7-spi"
-  "st,stm32f4-spi"
-- reg: Offset and length of the device's register set.
-- interrupts: Must contain the interrupt id.
-- clocks: Must contain an entry for spiclk (which feeds the internal clock
-         generator).
-- #address-cells:  Number of cells required to define a chip select address.
-- #size-cells: Should be zero.
-
-Optional properties:
-- resets: Must contain the phandle to the reset controller.
-- A pinctrl state named "default" may be defined to set pins in mode of
-  operation for SPI transfer.
-- dmas: DMA specifiers for tx and rx dma. DMA fifo mode must be used. See the
-  STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt.
-- dma-names: DMA request names should include "tx" and "rx" if present.
-- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
-  Documentation/devicetree/bindings/spi/spi-bus.txt
-
-
-Child nodes represent devices on the SPI bus
-  See ../spi/spi-bus.txt
-
-Optional properties:
-- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time
-                 delay in nanoseconds inserted between two consecutive data
-                 frames.
-
-
-Example:
-       spi2: spi@40003800 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "st,stm32h7-spi";
-               reg = <0x40003800 0x400>;
-               interrupts = <36>;
-               clocks = <&rcc SPI2_CK>;
-               resets = <&rcc 1166>;
-               dmas = <&dmamux1 0 39 0x400 0x01>,
-                      <&dmamux1 1 40 0x400 0x01>;
-               dma-names = "rx", "tx";
-               pinctrl-0 = <&spi2_pins_b>;
-               pinctrl-names = "default";
-               cs-gpios = <&gpioa 11 0>;
-
-               aardvark@0 {
-                       compatible = "totalphase,aardvark";
-                       reg = <0>;
-                       spi-max-frequency = <4000000>;
-                       st,spi-midi-ns = <4000>;
-               };
-       };
index f99c733..5bb4a8f 100644 (file)
@@ -1,7 +1,7 @@
 Atmel SPI device
 
 Required properties:
-- compatible : should be "atmel,at91rm9200-spi".
+- compatible : should be "atmel,at91rm9200-spi" or "microchip,sam9x60-spi".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain spi interrupt
 - cs-gpios: chipselects (optional for SPI controller version >= 2 with the
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
new file mode 100644 (file)
index 0000000..f0d9796
--- /dev/null
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/st,stm32-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 SPI Controller bindings
+
+description: |
+  The STM32 SPI controller is used to communicate with external devices using
+  the Serial Peripheral Interface. It supports full-duplex, half-duplex and
+  simplex synchronous serial communication with external devices. It supports
+  from 4 to 32-bit data size.
+
+maintainers:
+  - Erwan Leray <erwan.leray@st.com>
+  - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+allOf:
+  - $ref: "spi-controller.yaml#"
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32f4-spi
+
+    then:
+      properties:
+        st,spi-midi-ns: false
+
+properties:
+  compatible:
+    enum:
+      - st,stm32f4-spi
+      - st,stm32h7-spi
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  dmas:
+    description: |
+      DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
+      the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt.
+    items:
+      - description: rx DMA channel
+      - description: tx DMA channel
+
+  dma-names:
+    items:
+      - const: rx
+      - const: tx
+
+patternProperties:
+  "^[a-zA-Z][a-zA-Z0-9,+\\-._]{0,63}@[0-9a-f]+$":
+    type: object
+    # SPI slave nodes must be children of the SPI master node and can
+    # contain the following properties.
+    properties:
+      st,spi-midi-ns:
+        description: |
+          Only for STM32H7, (Master Inter-Data Idleness) minimum time
+          delay in nanoseconds inserted between two consecutive data frames.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    spi@4000b000 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      compatible = "st,stm32h7-spi";
+      reg = <0x4000b000 0x400>;
+      interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&rcc SPI2_K>;
+      resets = <&rcc SPI2_R>;
+      dmas = <&dmamux1 0 39 0x400 0x05>,
+             <&dmamux1 1 40 0x400 0x05>;
+      dma-names = "rx", "tx";
+      cs-gpios = <&gpioa 11 0>;
+
+      aardvark@0 {
+        compatible = "totalphase,aardvark";
+        reg = <0>;
+        spi-max-frequency = <4000000>;
+        st,spi-midi-ns = <4000>;
+      };
+    };
+
+...
index 45953f1..a9a7a3c 100644 (file)
@@ -151,6 +151,93 @@ The details of these operations are:
      Note that callbacks will always be invoked from the DMA
      engines tasklet, never from interrupt context.
 
+  Optional: per descriptor metadata
+  ---------------------------------
+  DMAengine provides two ways for metadata support.
+
+  DESC_METADATA_CLIENT
+
+    The metadata buffer is allocated/provided by the client driver and it is
+    attached to the descriptor.
+
+  .. code-block:: c
+
+     int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+                                  void *data, size_t len);
+
+  DESC_METADATA_ENGINE
+
+    The metadata buffer is allocated/managed by the DMA driver. The client
+    driver can ask for the pointer, maximum size and the currently used size of
+    the metadata and can directly update or read it.
+
+    Becasue the DMA driver manages the memory area containing the metadata,
+    clients must make sure that they do not try to access or get the pointer
+    after their transfer completion callback has run for the descriptor.
+    If no completion callback has been defined for the transfer, then the
+    metadata must not be accessed after issue_pending.
+    In other words: if the aim is to read back metadata after the transfer is
+    completed, then the client must use completion callback.
+
+  .. code-block:: c
+
+     void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+               size_t *payload_len, size_t *max_len);
+
+     int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+               size_t payload_len);
+
+  Client drivers can query if a given mode is supported with:
+
+  .. code-block:: c
+
+     bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+               enum dma_desc_metadata_mode mode);
+
+  Depending on the used mode client drivers must follow different flow.
+
+  DESC_METADATA_CLIENT
+
+    - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+         construct the metadata in the client's buffer
+      2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+         descriptor
+      3. submit the transfer
+    - DMA_DEV_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+      2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+         descriptor
+      3. submit the transfer
+      4. when the transfer is completed, the metadata should be available in the
+         attached buffer
+
+  DESC_METADATA_ENGINE
+
+    - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+      2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
+         engine's metadata area
+      3. update the metadata at the pointer
+      4. use dmaengine_desc_set_metadata_len()  to tell the DMA engine the
+         amount of data the client has placed into the metadata buffer
+      5. submit the transfer
+    - DMA_DEV_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+      2. submit the transfer
+      3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
+         the pointer to the engine's metadata area
+      4. read out the metadata from the pointer
+
+  .. note::
+
+     When DESC_METADATA_ENGINE mode is used the metadata area for the descriptor
+     is no longer valid after the transfer has been completed (valid up to the
+     point when the completion callback returns if used).
+
+     Mixed use of DESC_METADATA_CLIENT / DESC_METADATA_ENGINE is not allowed,
+     client drivers must use either of the modes per descriptor.
+
 4. Submit the transaction
 
    Once the descriptor has been prepared and the callback information
index dfc4486..790a150 100644 (file)
@@ -247,6 +247,54 @@ after each transfer. In case of a ring buffer, they may loop
 (DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
 are typically fixed.
 
+Per descriptor metadata support
+-------------------------------
+Some data movement architecture (DMA controller and peripherals) uses metadata
+associated with a transaction. The DMA controller role is to transfer the
+payload and the metadata alongside.
+The metadata itself is not used by the DMA engine itself, but it contains
+parameters, keys, vectors, etc for peripheral or from the peripheral.
+
+The DMAengine framework provides a generic ways to facilitate the metadata for
+descriptors. Depending on the architecture the DMA driver can implement either
+or both of the methods and it is up to the client driver to choose which one
+to use.
+
+- DESC_METADATA_CLIENT
+
+  The metadata buffer is allocated/provided by the client driver and it is
+  attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
+
+  From the DMA driver the following is expected for this mode:
+  - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
+    The data from the provided metadata buffer should be prepared for the DMA
+    controller to be sent alongside of the payload data. Either by copying to a
+    hardware descriptor, or highly coupled packet.
+  - DMA_DEV_TO_MEM
+    On transfer completion the DMA driver must copy the metadata to the client
+    provided metadata buffer before notifying the client about the completion.
+    After the transfer completion, DMA drivers must not touch the metadata
+    buffer provided by the client.
+
+- DESC_METADATA_ENGINE
+
+  The metadata buffer is allocated/managed by the DMA driver. The client driver
+  can ask for the pointer, maximum size and the currently used size of the
+  metadata and can directly update or read it. dmaengine_desc_get_metadata_ptr()
+  and dmaengine_desc_set_metadata_len() is provided as helper functions.
+
+  From the DMA driver the following is expected for this mode:
+  - get_metadata_ptr
+    Should return a pointer for the metadata buffer, the maximum size of the
+    metadata buffer and the currently used / valid (if any) bytes in the buffer.
+  - set_metadata_len
+    It is called by the clients after it have placed the metadata to the buffer
+    to let the DMA driver know the number of valid bytes provided.
+
+  Note: since the client will ask for the metadata pointer in the completion
+  callback (in DMA_DEV_TO_MEM case) the DMA driver must ensure that the
+  descriptor is not freed up prior the callback is called.
+
 Device operations
 -----------------
 
diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst
new file mode 100644 (file)
index 0000000..c81e0b4
--- /dev/null
@@ -0,0 +1,36 @@
+Kernel driver adm1177
+=====================
+
+Supported chips:
+  * Analog Devices ADM1177
+    Prefix: 'adm1177'
+    Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADM1177.pdf
+
+Author: Beniamin Bia <beniamin.bia@analog.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for Analog Devices ADM1177
+Hot-Swap Controller and Digital Power Monitors with Soft Start Pin.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Current maxim attribute
+is read-write, all other attributes are read-only.
+
+in0_input              Measured voltage in microvolts.
+
+curr1_input            Measured current in microamperes.
+curr1_max_alarm                Overcurrent alarm in microamperes.
diff --git a/Documentation/hwmon/drivetemp.rst b/Documentation/hwmon/drivetemp.rst
new file mode 100644 (file)
index 0000000..2d37d04
--- /dev/null
@@ -0,0 +1,52 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver drivetemp
+=======================
+
+
+References
+----------
+
+ANS T13/1699-D
+Information technology - AT Attachment 8 - ATA/ATAPI Command Set (ATA8-ACS)
+
+ANS Project T10/BSR INCITS 513
+Information technology - SCSI Primary Commands - 4 (SPC-4)
+
+ANS Project INCITS 557
+Information technology - SCSI / ATA Translation - 5 (SAT-5)
+
+
+Description
+-----------
+
+This driver supports reporting the temperature of disk and solid state
+drives with temperature sensors.
+
+If supported, it uses the ATA SCT Command Transport feature to read
+the current drive temperature and, if available, temperature limits
+as well as historic minimum and maximum temperatures. If SCT Command
+Transport is not supported, the driver uses SMART attributes to read
+the drive temperature.
+
+
+Sysfs entries
+-------------
+
+Only the temp1_input attribute is always available. Other attributes are
+available only if reported by the drive. All temperatures are reported in
+milli-degrees Celsius.
+
+=======================        =====================================================
+temp1_input            Current drive temperature
+temp1_lcrit            Minimum temperature limit. Operating the device below
+                       this temperature may cause physical damage to the
+                       device.
+temp1_min              Minimum recommended continuous operating limit
+temp1_max              Maximum recommended continuous operating temperature
+temp1_crit             Maximum temperature limit. Operating the device above
+                       this temperature may cause physical damage to the
+                       device.
+temp1_lowest           Minimum temperature seen this power cycle
+temp1_highest          Maximum temperature seen this power cycle
+=======================        =====================================================
index 43cc605..b24adb6 100644 (file)
@@ -29,6 +29,7 @@ Hardware Monitoring Kernel Drivers
    adm1025
    adm1026
    adm1031
+   adm1177
    adm1275
    adm9240
    ads7828
@@ -47,6 +48,7 @@ Hardware Monitoring Kernel Drivers
    da9055
    dell-smm-hwmon
    dme1737
+   drivetemp
    ds1621
    ds620
    emc1403
@@ -106,8 +108,10 @@ Hardware Monitoring Kernel Drivers
    max1619
    max1668
    max197
+   max20730
    max20751
    max31722
+   max31730
    max31785
    max31790
    max34440
@@ -177,6 +181,7 @@ Hardware Monitoring Kernel Drivers
    wm831x
    wm8350
    xgene-hwmon
+   xdpe12284
    zl6100
 
 .. only::  subproject and html
diff --git a/Documentation/hwmon/max20730.rst b/Documentation/hwmon/max20730.rst
new file mode 100644 (file)
index 0000000..cea7ae5
--- /dev/null
@@ -0,0 +1,74 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver max20730
+======================
+
+Supported chips:
+
+  * Maxim MAX20730
+
+    Prefix: 'max20730'
+
+    Addresses scanned: -
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20730.pdf
+
+  * Maxim MAX20734
+
+    Prefix: 'max20734'
+
+    Addresses scanned: -
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20734.pdf
+
+  * Maxim MAX20743
+
+    Prefix: 'max20743'
+
+    Addresses scanned: -
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20743.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for Maxim MAX20730, MAX20734, and MAX20743
+Integrated, Step-Down Switching Regulators with PMBus support.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Sysfs entries
+-------------
+
+=================== ===== =======================================================
+curr1_crit          RW/RO Critical output current. Please see datasheet for
+                          supported limits. Read-only if the chip is
+                          write protected; read-write otherwise.
+curr1_crit_alarm    RO    Output current critical alarm
+curr1_input         RO    Output current
+curr1_label         RO    'iout1'
+in1_alarm           RO    Input voltage alarm
+in1_input           RO    Input voltage
+in1_label           RO    'vin'
+in2_alarm           RO    Output voltage alarm
+in2_input           RO    Output voltage
+in2_label           RO    'vout1'
+temp1_crit          RW/RO Critical temeperature. Supported values are 130 or 150
+                          degrees C. Read-only if the chip is write protected;
+                          read-write otherwise.
+temp1_crit_alarm    RO    Temperature critical alarm
+temp1_input         RO    Chip temperature
+=================== ===== =======================================================
diff --git a/Documentation/hwmon/max31730.rst b/Documentation/hwmon/max31730.rst
new file mode 100644 (file)
index 0000000..def0de1
--- /dev/null
@@ -0,0 +1,44 @@
+Kernel driver max31790
+======================
+
+Supported chips:
+
+  * Maxim MAX31730
+
+    Prefix: 'max31730'
+
+    Addresses scanned: 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, 0x4d, 0x4e, 0x4f
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31730.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for Maxim MAX31730.
+
+The MAX31730 temperature sensor monitors its own temperature and the
+temperatures of three external diode-connected transistors. The operating
+supply voltage is from 3.0V to 3.6V. Resistance cancellation compensates
+for high series resistance in circuit-board traces and the external thermal
+diode, while beta compensation corrects for temperature-measurement
+errors due to low-beta sensing transistors.
+
+
+Sysfs entries
+-------------
+
+=================== == =======================================================
+temp[1-4]_enable    RW Temperature enable/disable
+                       Set to 0 to enable channel, 0 to disable
+temp[1-4]_input     RO Temperature input
+temp[2-4]_fault     RO Fault indicator for remote channels
+temp[1-4]_max       RW Maximum temperature
+temp[1-4]_max_alarm RW Maximum temperature alarm
+temp[1-4]_min       RW Minimum temperature. Common for all channels.
+                       Only temp1_min is writeable.
+temp[1-4]_min_alarm RO Minimum temperature alarm
+temp[2-4]_offset    RW Temperature offset for remote channels
+=================== == =======================================================
index abfb9dd..f787984 100644 (file)
@@ -63,6 +63,16 @@ Supported chips:
 
        http://www.ti.com/lit/gpn/tps544c25
 
+  * Maxim MAX20796
+
+    Prefix: 'max20796'
+
+    Addresses scanned: -
+
+    Datasheet:
+
+       Not published
+
   * Generic PMBus devices
 
     Prefix: 'pmbus'
index 746f21f..704f0cb 100644 (file)
@@ -3,9 +3,10 @@ Kernel driver ucd9000
 
 Supported chips:
 
-  * TI UCD90120, UCD90124, UCD90160, UCD9090, and UCD90910
+  * TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, and UCD90910
 
-    Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd9090', 'ucd90910'
+    Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd90320', 'ucd9090',
+              'ucd90910'
 
     Addresses scanned: -
 
@@ -14,6 +15,7 @@ Supported chips:
        - http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
+       - http://focus.ti.com/lit/ds/symlink/ucd90320.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
 
@@ -45,6 +47,12 @@ power-on reset signals, external interrupts, cascading, or other system
 functions. Twelve of these pins offer PWM functionality. Using these pins, the
 UCD90160 offers support for margining, and general-purpose PWM functions.
 
+The UCD90320 is a 32-rail PMBus/I2C addressable power-supply sequencer and
+monitor. The 24 integrated ADC channels (AMONx) monitor the power supply
+voltage, current, and temperature. Of the 84 GPIO pins, 8 can be used as
+digital monitors (DMONx), 32 to enable the power supply (ENx), 24 for margining
+(MARx), 16 for logical GPO, and 32 GPIs for cascading, and system function.
+
 The UCD9090 is a 10-rail PMBus/I2C addressable power-supply sequencer and
 monitor. The device integrates a 12-bit ADC for monitoring up to 10 power-supply
 voltage inputs. Twenty-three GPIO pins can be used for power supply enables,
diff --git a/Documentation/hwmon/xdpe12284.rst b/Documentation/hwmon/xdpe12284.rst
new file mode 100644 (file)
index 0000000..6b7ae98
--- /dev/null
@@ -0,0 +1,101 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver xdpe122
+=====================
+
+Supported chips:
+
+  * Infineon XDPE12254
+
+    Prefix: 'xdpe12254'
+
+  * Infineon XDPE12284
+
+    Prefix: 'xdpe12284'
+
+Authors:
+
+       Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+This driver implements support for Infineon Multi-phase XDPE122 family
+dual loop voltage regulators.
+The family includes XDPE12284 and XDPE12254 devices.
+The devices from this family complaint with:
+- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
+  converter specification.
+- Intel SVID rev 1.9. protocol.
+- PMBus rev 1.3 interface.
+
+Devices support linear format for reading input voltage, input and output current,
+input and output power and temperature.
+Device supports VID format for reading output voltage. The below modes are
+supported:
+- VR12.0 mode, 5-mV DAC - 0x01.
+- VR12.5 mode, 10-mV DAC - 0x02.
+- IMVP9 mode, 5-mV DAC - 0x03.
+- AMD mode 6.25mV - 0x10.
+
+Devices support two pages for telemetry.
+
+The driver provides for current: input, maximum and critical thresholds
+and maximum and critical alarms. Critical thresholds and critical alarm are
+supported only for current output.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "iin" and 3, 4 for "iout":
+
+**curr[3-4]_crit**
+
+**curr[3-4]_crit_alarm**
+
+**curr[1-4]_input**
+
+**curr[1-4]_label**
+
+**curr[1-4]_max**
+
+**curr[1-4]_max_alarm**
+
+The driver provides for voltage: input, critical and low critical thresholds
+and critical and low critical alarms.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "vin" and 3, 4 for "vout":
+
+**in[1-4]_crit**
+
+**in[1-4_crit_alarm**
+
+**in[1-4]_input**
+
+**in[1-4_label**
+
+**in[1-4]_lcrit**
+
+**in[1-41_lcrit_alarm**
+
+The driver provides for power: input and alarms. Power alarm is supported only
+for power input.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "pin" and 3, 4 for "pout":
+
+**power[1-2]_alarm**
+
+**power[1-4]_input**
+
+**power[1-4]_label**
+
+The driver provides for temperature: input, maximum and critical thresholds
+and maximum and critical alarms.
+The driver exports the following attributes for via the sysfs files:
+
+**temp[1-2]_crit**
+
+**temp[1-2]_crit_alarm**
+
+**temp[1-2]_input**
+
+**temp[1-2]_max**
+
+**temp[1-2]_max_alarm**
index 66b7281..5924365 100644 (file)
@@ -977,6 +977,15 @@ W: http://ez.analog.com/community/linux-device-drivers
 F:     drivers/iio/imu/adis16460.c
 F:     Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml
 
+ANALOG DEVICES INC ADM1177 DRIVER
+M:     Beniamin Bia <beniamin.bia@analog.com>
+M:     Michael Hennerich <Michael.Hennerich@analog.com>
+L:     linux-hwmon@vger.kernel.org
+W:     http://ez.analog.com/community/linux-device-drivers
+S:     Supported
+F:     drivers/hwmon/adm1177.c
+F:     Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
+
 ANALOG DEVICES INC ADP5061 DRIVER
 M:     Stefan Popa <stefan.popa@analog.com>
 L:     linux-pm@vger.kernel.org
@@ -2242,6 +2251,7 @@ L:        linux-rockchip@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+F:     Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
 F:     arch/arm/boot/dts/rk3*
 F:     arch/arm/boot/dts/rv1108*
 F:     arch/arm/mach-rockchip/
@@ -6197,6 +6207,7 @@ ETHERNET PHY LIBRARY
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Heiner Kallweit <hkallweit1@gmail.com>
+R:     Russell King <linux@armlinux.org.uk>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-net-phydev
@@ -7498,6 +7509,12 @@ S:       Supported
 F:     drivers/scsi/hisi_sas/
 F:     Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
 
+HISILICON V3XX SPI NOR FLASH Controller Driver
+M:     John Garry <john.garry@huawei.com>
+W:     http://www.hisilicon.com
+S:     Maintained
+F:     drivers/spi/spi-hisi-sfc-v3xx.c
+
 HISILICON QM AND ZIP Controller DRIVER
 M:     Zhou Wang <wangzhou1@hisilicon.com>
 L:     linux-crypto@vger.kernel.org
@@ -7841,10 +7858,10 @@ F:      Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
 F:     drivers/i3c/master/dw*
 
 I3C DRIVER FOR CADENCE I3C MASTER IP
-M:      Przemysław Gaj <pgaj@cadence.com>
-S:      Maintained
-F:      Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
-F:      drivers/i3c/master/i3c-master-cdns.c
+M:     Przemysław Gaj <pgaj@cadence.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+F:     drivers/i3c/master/i3c-master-cdns.c
 
 IA64 (Itanium) PLATFORM
 M:     Tony Luck <tony.luck@intel.com>
@@ -8381,6 +8398,14 @@ Q:       https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Supported
 F:     drivers/dma/ioat*
 
+INTEL IADX DRIVER
+M:     Dave Jiang <dave.jiang@intel.com>
+L:     dmaengine@vger.kernel.org
+S:     Supported
+F:     drivers/dma/idxd/*
+F:     include/uapi/linux/idxd.h
+F:     include/linux/idxd.h
+
 INTEL IDLE DRIVER
 M:     Jacob Pan <jacob.jun.pan@linux.intel.com>
 M:     Len Brown <lenb@kernel.org>
@@ -8562,6 +8587,12 @@ S:       Maintained
 F:     arch/x86/include/asm/intel_telemetry.h
 F:     drivers/platform/x86/intel_telemetry*
 
+INTEL UNCORE FREQUENCY CONTROL
+M:     Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/intel-uncore-frequency.c
+
 INTEL VIRTUAL BUTTON DRIVER
 M:     AceLan Kao <acelan.kao@canonical.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -8569,7 +8600,7 @@ S:        Maintained
 F:     drivers/platform/x86/intel-vbtn.c
 
 INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
-M:     Stanislaw Gruszka <sgruszka@redhat.com>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/net/wireless/intel/iwlegacy/
@@ -11506,6 +11537,7 @@ F:      drivers/net/dsa/
 
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
@@ -13151,6 +13183,11 @@ S:     Maintained
 F:     drivers/iio/chemical/pms7003.c
 F:     Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
 
+PLX DMA DRIVER
+M:     Logan Gunthorpe <logang@deltatee.com>
+S:     Maintained
+F:     drivers/dma/plx_dma.c
+
 PMBUS HARDWARE MONITORING DRIVERS
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
@@ -13827,7 +13864,7 @@ S:      Maintained
 F:     arch/mips/ralink
 
 RALINK RT2X00 WIRELESS LAN DRIVER
-M:     Stanislaw Gruszka <sgruszka@redhat.com>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
 M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
@@ -16608,7 +16645,7 @@ F:      kernel/time/ntp.c
 F:     tools/testing/selftests/timers/
 
 TIPC NETWORK LAYER
-M:     Jon Maloy <jon.maloy@ericsson.com>
+M:     Jon Maloy <jmaloy@redhat.com>
 M:     Ying Xue <ying.xue@windriver.com>
 L:     netdev@vger.kernel.org (core kernel code)
 L:     tipc-discussion@lists.sourceforge.net (user apps, general discussion)
index c50ef91..6a01b07 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 7ad0798..91f93bc 100644 (file)
 };
 
 / {
+       memory@80000000 {
+               device_type = "memory";
+               reg = <0x80000000 0x20000000>; /* 512 MB */
+       };
+
        clk_mcasp0_fixed: clk_mcasp0_fixed {
                #clock-cells = <0>;
                compatible = "fixed-clock";
index 078cb47..a6fbc08 100644 (file)
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&spi0_pins_default>;
        pinctrl-1 = <&spi0_pins_sleep>;
+       ti,pindir-d0-out-d1-in = <1>;
 };
 
 &spi1 {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&spi1_pins_default>;
        pinctrl-1 = <&spi1_pins_sleep>;
+       ti,pindir-d0-out-d1-in = <1>;
 };
 
 &usb2_phy1 {
index ae50203..6607fa8 100644 (file)
@@ -146,10 +146,9 @@ ARM_BE8(orr        r7, r7, #(1 << 25))     @ HSCTLR.EE
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
        @ make CNTP_* and CNTPCT accessible from PL1
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
-       lsr     r7, #16
-       and     r7, #0xf
-       cmp     r7, #1
-       bne     1f
+       ubfx    r7, r7, #16, #4
+       teq     r7, #0
+       beq     1f
        mrc     p15, 4, r7, c14, c1, 0  @ CNTHCTL
        orr     r7, r7, #3              @ PL1PCEN | PL1PCTEN
        mcr     p15, 4, r7, c14, c1, 0  @ CNTHCTL
index e688dfa..5616cab 100644 (file)
@@ -162,6 +162,7 @@ config ARM64
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_FUNCTION_ARG_ACCESS_API
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_RCU_TABLE_FREE
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
@@ -302,6 +303,9 @@ config ARCH_SUPPORTS_UPROBES
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
+config BROKEN_GAS_INST
+       def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
+
 config KASAN_SHADOW_OFFSET
        hex
        depends on KASAN
@@ -515,9 +519,13 @@ config ARM64_ERRATUM_1418040
 
          If unsure, say Y.
 
+config ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+       bool
+
 config ARM64_ERRATUM_1165522
        bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
        default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
        help
          This option adds a workaround for ARM Cortex-A76 erratum 1165522.
 
@@ -527,6 +535,19 @@ config ARM64_ERRATUM_1165522
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_1530923
+       bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
+       default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+       help
+         This option adds a workaround for ARM Cortex-A55 erratum 1530923.
+
+         Affected Cortex-A55 cores (r0p0, r0p1, r1p0, r2p0) could end-up with
+         corrupted TLBs by speculating an AT instruction during a guest
+         context switch.
+
+         If unsure, say Y.
+
 config ARM64_ERRATUM_1286807
        bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
        default y
@@ -543,9 +564,13 @@ config ARM64_ERRATUM_1286807
          invalidated has been observed by other observers. The
          workaround repeats the TLBI+DSB operation.
 
+config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
+       bool
+
 config ARM64_ERRATUM_1319367
        bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
        default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
        help
          This option adds work arounds for ARM Cortex-A57 erratum 1319537
          and A72 erratum 1319367
@@ -1364,6 +1389,11 @@ config ARM64_PAN
         instruction if the cpu does not implement the feature.
 
 config ARM64_LSE_ATOMICS
+       bool
+       default ARM64_USE_LSE_ATOMICS
+       depends on $(as-instr,.arch_extension lse)
+
+config ARM64_USE_LSE_ATOMICS
        bool "Atomic instructions"
        depends on JUMP_LABEL
        default y
@@ -1485,6 +1515,30 @@ config ARM64_PTR_AUTH
 
 endmenu
 
+menu "ARMv8.5 architectural features"
+
+config ARM64_E0PD
+       bool "Enable support for E0PD"
+       default y
+       help
+         E0PD (part of the ARMv8.5 extensions) allows us to ensure
+         that EL0 accesses made via TTBR1 always fault in constant time,
+         providing similar benefits to KASLR as those provided by KPTI, but
+         with lower overhead and without disrupting legitimate access to
+         kernel memory such as SPE.
+
+         This option enables E0PD for TTBR1 where available.
+
+config ARCH_RANDOM
+       bool "Enable support for random number generation"
+       default y
+       help
+         Random number generation (part of the ARMv8.5 Extensions)
+         provides a high bandwidth, cryptographically secure
+         hardware random number generator.
+
+endmenu
+
 config ARM64_SVE
        bool "ARM Scalable Vector Extension support"
        default y
@@ -1545,7 +1599,7 @@ config ARM64_MODULE_PLTS
 
 config ARM64_PSEUDO_NMI
        bool "Support for NMI-like interrupts"
-       select CONFIG_ARM_GIC_V3
+       select ARM_GIC_V3
        help
          Adds support for mimicking Non-Maskable Interrupts through the use of
          GIC interrupt priority. This support requires version 3 or later of
index 1fbe24d..dca1a97 100644 (file)
@@ -30,11 +30,8 @@ LDFLAGS_vmlinux      += --fix-cortex-a53-843419
   endif
 endif
 
-# Check for binutils support for specific extensions
-lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
-
-ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
-  ifeq ($(lseinstr),)
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
+  ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
 $(warning LSE atomics not supported by binutils)
   endif
 endif
@@ -45,19 +42,15 @@ cc_has_k_constraint := $(call try-run,echo                          \
                return 0;                                               \
        }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
 
-ifeq ($(CONFIG_ARM64), y)
-brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
-
-  ifneq ($(brokengasinst),)
+ifeq ($(CONFIG_BROKEN_GAS_INST),y)
 $(warning Detected assembler with broken .inst; disassembly will be unreliable)
-  endif
 endif
 
-KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst)     \
+KBUILD_CFLAGS  += -mgeneral-regs-only  \
                   $(compat_vdso) $(cc_has_k_constraint)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS  += $(call cc-disable-warning, psabi)
-KBUILD_AFLAGS  += $(lseinstr) $(brokengasinst) $(compat_vdso)
+KBUILD_AFLAGS  += $(compat_vdso)
 
 KBUILD_CFLAGS  += $(call cc-option,-mabi=lp64)
 KBUILD_AFLAGS  += $(call cc-option,-mabi=lp64)
index 1f012c5..cd34148 100644 (file)
@@ -16,7 +16,7 @@
 
 OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
 
-targets := Image Image.gz
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
 
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
index b9f8d78..324e7d5 100644 (file)
@@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length);
 static inline void apply_alternatives_module(void *start, size_t length) { }
 #endif
 
-#define ALTINSTR_ENTRY(feature,cb)                                           \
+#define ALTINSTR_ENTRY(feature)                                                      \
        " .word 661b - .\n"                             /* label           */ \
-       " .if " __stringify(cb) " == 0\n"                                     \
        " .word 663f - .\n"                             /* new instruction */ \
-       " .else\n"                                                            \
+       " .hword " __stringify(feature) "\n"            /* feature bit     */ \
+       " .byte 662b-661b\n"                            /* source len      */ \
+       " .byte 664f-663f\n"                            /* replacement len */
+
+#define ALTINSTR_ENTRY_CB(feature, cb)                                       \
+       " .word 661b - .\n"                             /* label           */ \
        " .word " __stringify(cb) "- .\n"               /* callback */        \
-       " .endif\n"                                                           \
        " .hword " __stringify(feature) "\n"            /* feature bit     */ \
        " .byte 662b-661b\n"                            /* source len      */ \
        " .byte 664f-663f\n"                            /* replacement len */
@@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
  *
  * Alternatives with callbacks do not generate replacement instructions.
  */
-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb)        \
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)    \
        ".if "__stringify(cfg_enabled)" == 1\n"                         \
        "661:\n\t"                                                      \
        oldinstr "\n"                                                   \
        "662:\n"                                                        \
        ".pushsection .altinstructions,\"a\"\n"                         \
-       ALTINSTR_ENTRY(feature,cb)                                      \
+       ALTINSTR_ENTRY(feature)                                         \
        ".popsection\n"                                                 \
-       " .if " __stringify(cb) " == 0\n"                               \
        ".pushsection .altinstr_replacement, \"a\"\n"                   \
        "663:\n\t"                                                      \
        newinstr "\n"                                                   \
@@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        ".popsection\n\t"                                               \
        ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
        ".org   . - (662b-661b) + (664b-663b)\n"                        \
-       ".else\n\t"                                                     \
+       ".endif\n"
+
+#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb)       \
+       ".if "__stringify(cfg_enabled)" == 1\n"                         \
+       "661:\n\t"                                                      \
+       oldinstr "\n"                                                   \
+       "662:\n"                                                        \
+       ".pushsection .altinstructions,\"a\"\n"                         \
+       ALTINSTR_ENTRY_CB(feature, cb)                                  \
+       ".popsection\n"                                                 \
        "663:\n\t"                                                      \
        "664:\n\t"                                                      \
-       ".endif\n"                                                      \
        ".endif\n"
 
 #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)        \
-       __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
+       __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
 
 #define ALTERNATIVE_CB(oldinstr, cb) \
-       __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+       __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
 #else
 
 #include <asm/assembler.h>
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
new file mode 100644 (file)
index 0000000..3fe02da
--- /dev/null
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <linux/random.h>
+#include <asm/cpufeature.h>
+
+static inline bool __arm64_rndr(unsigned long *v)
+{
+       bool ok;
+
+       /*
+        * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
+        * and set PSTATE.NZCV to 0b0100 otherwise.
+        */
+       asm volatile(
+               __mrs_s("%0", SYS_RNDR_EL0) "\n"
+       "       cset %w1, ne\n"
+       : "=r" (*v), "=r" (ok)
+       :
+       : "cc");
+
+       return ok;
+}
+
+static inline bool __must_check arch_get_random_long(unsigned long *v)
+{
+       return false;
+}
+
+static inline bool __must_check arch_get_random_int(unsigned int *v)
+{
+       return false;
+}
+
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+{
+       /*
+        * Only support the generic interface after we have detected
+        * the system wide capability, avoiding complexity with the
+        * cpufeature code and with potential scheduling between CPUs
+        * with and without the feature.
+        */
+       if (!cpus_have_const_cap(ARM64_HAS_RNG))
+               return false;
+
+       return __arm64_rndr(v);
+}
+
+
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+{
+       unsigned long val;
+       bool ok = arch_get_random_seed_long(&val);
+
+       *v = val;
+       return ok;
+}
+
+static inline bool __init __early_cpu_has_rndr(void)
+{
+       /* Open code as we run prior to the first call to cpufeature. */
+       unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
+       return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
+}
+
+#else
+
+static inline bool __arm64_rndr(unsigned long *v) { return false; }
+static inline bool __init __early_cpu_has_rndr(void) { return false; }
+
+#endif /* CONFIG_ARCH_RANDOM */
+#endif /* _ASM_ARCHRANDOM_H */
index b8cf7c8..524b3ea 100644 (file)
        msr     daif, \flags
        .endm
 
-       /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
-       .macro  inherit_daif, pstate:req, tmp:req
-       and     \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
-       msr     daif, \tmp
-       .endm
-
        /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
        .macro enable_da_f
        msr     daifclr, #(8 | 4 | 1)
 9990:
        .endm
 
-/*
- * SMP data memory barrier
- */
-       .macro  smp_dmb, opt
-       dmb     \opt
-       .endm
-
 /*
  * RAS Error Synchronization barrier
  */
@@ -461,17 +448,6 @@ USER(\label, ic    ivau, \tmp2)                    // invalidate I line PoU
        b.ne    9998b
        .endm
 
-/*
- * Annotate a function as position independent, i.e., safe to be called before
- * the kernel virtual mapping is activated.
- */
-#define ENDPIPROC(x)                   \
-       .globl  __pi_##x;               \
-       .type   __pi_##x, %function;    \
-       .set    __pi_##x, x;            \
-       .size   __pi_##x, . - x;        \
-       ENDPROC(x)
-
 /*
  * Annotate a function as being unsuitable for kprobes.
  */
index 7b01214..13869b7 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/stringify.h>
 
-#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
 #define __LL_SC_FALLBACK(asm_ops)                                      \
 "      b       3f\n"                                                   \
 "      .subsection     1\n"                                            \
index 574808b..da3280f 100644 (file)
@@ -14,6 +14,7 @@
 static inline void __lse_atomic_##op(int i, atomic_t *v)                       \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op "     %w[i], %[v]\n"                                  \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)    \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op #mb " %w[i], %w[i], %[v]"                             \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v)   \
        u32 tmp;                                                        \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
        "       add     %w[i], %w[i], %w[tmp]"                          \
        : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN(        , al, "memory")
 static inline void __lse_atomic_and(int i, atomic_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       mvn     %w[i], %w[i]\n"
        "       stclr   %w[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)     \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mvn     %w[i], %w[i]\n"                                 \
        "       ldclr" #mb "    %w[i], %w[i], %[v]"                     \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
 static inline void __lse_atomic_sub(int i, atomic_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       neg     %w[i], %w[i]\n"
        "       stadd   %w[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
        u32 tmp;                                                        \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
        "       add     %w[i], %w[i], %w[tmp]"                          \
@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN(        , al, "memory")
 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)     \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[i], %[v]"                     \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)           \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op "     %[i], %[v]\n"                                   \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op #mb " %[i], %[i], %[v]"                               \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
        "       add     %[i], %[i], %x[tmp]"                            \
        : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       mvn     %[i], %[i]\n"
        "       stclr   %[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)        \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mvn     %[i], %[i]\n"                                   \
        "       ldclr" #mb "    %[i], %[i], %[v]"                       \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       neg     %[i], %[i]\n"
        "       stadd   %[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)    \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
        "       add     %[i], %[i], %x[tmp]"                            \
@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)        \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %[i], %[v]"                       \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
        unsigned long tmp;
 
        asm volatile(
+       __LSE_PREAMBLE
        "1:     ldr     %x[tmp], %[v]\n"
        "       subs    %[ret], %x[tmp], #1\n"
        "       b.lt    2f\n"
@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr,                  \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mov     %" #w "[tmp], %" #w "[old]\n"                   \
        "       cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"    \
        "       mov     %" #w "[ret], %" #w "[tmp]"                     \
@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                             \
        register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
        "       eor     %[old1], %[old1], %[oldval1]\n"                 \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
index d064a50..8d2a7de 100644 (file)
@@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 }
 #define ip_fast_csum ip_fast_csum
 
+extern unsigned int do_csum(const unsigned char *buff, int len);
+#define do_csum do_csum
+
 #include <asm-generic/checksum.h>
 
 #endif /* __ASM_CHECKSUM_H */
index d72d995..b4a4053 100644 (file)
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
        u32             reg_id_isar3;
        u32             reg_id_isar4;
        u32             reg_id_isar5;
+       u32             reg_id_isar6;
        u32             reg_id_mmfr0;
        u32             reg_id_mmfr1;
        u32             reg_id_mmfr2;
index b926838..865e025 100644 (file)
@@ -44,7 +44,7 @@
 #define ARM64_SSBS                             34
 #define ARM64_WORKAROUND_1418040               35
 #define ARM64_HAS_SB                           36
-#define ARM64_WORKAROUND_1165522               37
+#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE    37
 #define ARM64_HAS_ADDRESS_AUTH_ARCH            38
 #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF         39
 #define ARM64_HAS_GENERIC_AUTH_ARCH            40
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM    45
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
 #define ARM64_WORKAROUND_1542419               47
-#define ARM64_WORKAROUND_1319367               48
+#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE   48
+#define ARM64_HAS_E0PD                         49
+#define ARM64_HAS_RNG                          50
 
-#define ARM64_NCAPS                            49
+#define ARM64_NCAPS                            51
 
 #endif /* __ASM_CPUCAPS_H */
index 4261d55..92ef953 100644 (file)
@@ -613,6 +613,11 @@ static inline bool system_has_prio_mask_debugging(void)
               system_uses_irq_prio_masking();
 }
 
+static inline bool system_capabilities_finalized(void)
+{
+       return static_branch_likely(&arm64_const_caps_ready);
+}
+
 #define ARM64_BP_HARDEN_UNKNOWN                -1
 #define ARM64_BP_HARDEN_WA_NEEDED      0
 #define ARM64_BP_HARDEN_NOT_REQUIRED   1
index aca07c2..a87a93f 100644 (file)
@@ -85,6 +85,8 @@
 #define QCOM_CPU_PART_FALKOR_V1                0x800
 #define QCOM_CPU_PART_FALKOR           0xC00
 #define QCOM_CPU_PART_KRYO             0x200
+#define QCOM_CPU_PART_KRYO_3XX_SILVER  0x803
+#define QCOM_CPU_PART_KRYO_4XX_SILVER  0x805
 
 #define NVIDIA_CPU_PART_DENVER         0x003
 #define NVIDIA_CPU_PART_CARMEL         0x004
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
 #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
index 72acd2d..ec213b4 100644 (file)
@@ -38,7 +38,7 @@ static inline void local_daif_mask(void)
        trace_hardirqs_off();
 }
 
-static inline unsigned long local_daif_save(void)
+static inline unsigned long local_daif_save_flags(void)
 {
        unsigned long flags;
 
@@ -50,6 +50,15 @@ static inline unsigned long local_daif_save(void)
                        flags |= PSR_I_BIT;
        }
 
+       return flags;
+}
+
+static inline unsigned long local_daif_save(void)
+{
+       unsigned long flags;
+
+       flags = local_daif_save_flags();
+
        local_daif_mask();
 
        return flags;
index 4d5f3b5..b87c6e2 100644 (file)
@@ -45,8 +45,8 @@ void do_sysinstr(unsigned int esr, struct pt_regs *regs);
 void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
 void do_cp15instr(unsigned int esr, struct pt_regs *regs);
-void el0_svc_handler(struct pt_regs *regs);
-void el0_svc_compat_handler(struct pt_regs *regs);
+void do_el0_svc(struct pt_regs *regs);
+void do_el0_svc_compat(struct pt_regs *regs);
 void do_el0_ia_bp_hardening(unsigned long addr,  unsigned int esr,
                            struct pt_regs *regs);
 
index 3d2f247..0f00265 100644 (file)
 #define KERNEL_HWCAP_SVESM4            __khwcap2_feature(SVESM4)
 #define KERNEL_HWCAP_FLAGM2            __khwcap2_feature(FLAGM2)
 #define KERNEL_HWCAP_FRINT             __khwcap2_feature(FRINT)
+#define KERNEL_HWCAP_SVEI8MM           __khwcap2_feature(SVEI8MM)
+#define KERNEL_HWCAP_SVEF32MM          __khwcap2_feature(SVEF32MM)
+#define KERNEL_HWCAP_SVEF64MM          __khwcap2_feature(SVEF64MM)
+#define KERNEL_HWCAP_SVEBF16           __khwcap2_feature(SVEBF16)
+#define KERNEL_HWCAP_I8MM              __khwcap2_feature(I8MM)
+#define KERNEL_HWCAP_BF16              __khwcap2_feature(BF16)
+#define KERNEL_HWCAP_DGH               __khwcap2_feature(DGH)
+#define KERNEL_HWCAP_RNG               __khwcap2_feature(RNG)
 
 /*
  * This yields a mask that user programs can use to figure out what
index 12a561a..d24b527 100644 (file)
@@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {}
 struct kimage_arch {
        void *dtb;
        unsigned long dtb_mem;
+       /* Core ELF header buffer */
+       void *elf_headers;
+       unsigned long elf_headers_mem;
+       unsigned long elf_headers_sz;
 };
 
 extern const struct kexec_file_ops kexec_image_ops;
index c61260c..f5acdde 100644 (file)
@@ -547,7 +547,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
         * wrong, and hyp will crash and burn when it uses any
         * cpus_have_const_cap() wrapper.
         */
-       BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+       BUG_ON(!system_capabilities_finalized());
        __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
 
        /*
@@ -571,7 +571,7 @@ static inline bool kvm_arch_requires_vhe(void)
                return true;
 
        /* Some implementations have defects that confine them to VHE */
-       if (cpus_have_cap(ARM64_WORKAROUND_1165522))
+       if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
                return true;
 
        return false;
index 97f21cc..a3a6a2b 100644 (file)
@@ -91,11 +91,11 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
        write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
 
        /*
-        * ARM erratum 1165522 requires the actual execution of the above
-        * before we can switch to the EL1/EL0 translation regime used by
+        * ARM errata 1165522 and 1530923 require the actual execution of the
+        * above before we can switch to the EL1/EL0 translation regime used by
         * the guest.
         */
-       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
 }
 
 #endif /* __ARM64_KVM_HYP_H__ */
index 1b26629..ebee311 100644 (file)
@@ -4,4 +4,20 @@
 #define __ALIGN                .align 2
 #define __ALIGN_STR    ".align 2"
 
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define SYM_FUNC_START_PI(x)                   \
+               SYM_FUNC_START_ALIAS(__pi_##x); \
+               SYM_FUNC_START(x)
+
+#define SYM_FUNC_START_WEAK_PI(x)              \
+               SYM_FUNC_START_ALIAS(__pi_##x); \
+               SYM_FUNC_START_WEAK(x)
+
+#define SYM_FUNC_END_PI(x)                     \
+               SYM_FUNC_END(x);                \
+               SYM_FUNC_END_ALIAS(__pi_##x)
+
 #endif
index 80b3882..d429f77 100644 (file)
@@ -4,7 +4,9 @@
 
 #include <asm/atomic_ll_sc.h>
 
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+
+#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
 
 #include <linux/compiler_types.h>
 #include <linux/export.h>
@@ -14,8 +16,6 @@
 #include <asm/atomic_lse.h>
 #include <asm/cpucaps.h>
 
-__asm__(".arch_extension       lse");
-
 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
 extern struct static_key_false arm64_const_caps_ready;
 
@@ -34,9 +34,9 @@ static inline bool system_uses_lse_atomics(void)
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)                               \
-       ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+       ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
 
-#else  /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#else  /* CONFIG_ARM64_LSE_ATOMICS */
 
 static inline bool system_uses_lse_atomics(void) { return false; }
 
@@ -44,5 +44,5 @@ static inline bool system_uses_lse_atomics(void) { return false; }
 
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)       llsc
 
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
 #endif /* __ASM_LSE_H */
index f217e32..e4d8624 100644 (file)
@@ -29,52 +29,11 @@ typedef struct {
  */
 #define ASID(mm)       ((mm)->context.id.counter & 0xffff)
 
-static inline bool arm64_kernel_unmapped_at_el0(void)
-{
-       return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
-              cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
-}
+extern bool arm64_use_ng_mappings;
 
-static inline bool arm64_kernel_use_ng_mappings(void)
+static inline bool arm64_kernel_unmapped_at_el0(void)
 {
-       bool tx1_bug;
-
-       /* What's a kpti? Use global mappings if we don't know. */
-       if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
-               return false;
-
-       /*
-        * Note: this function is called before the CPU capabilities have
-        * been configured, so our early mappings will be global. If we
-        * later determine that kpti is required, then
-        * kpti_install_ng_mappings() will make them non-global.
-        */
-       if (arm64_kernel_unmapped_at_el0())
-               return true;
-
-       if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-               return false;
-
-       /*
-        * KASLR is enabled so we're going to be enabling kpti on non-broken
-        * CPUs regardless of their susceptibility to Meltdown. Rather
-        * than force everybody to go through the G -> nG dance later on,
-        * just put down non-global mappings from the beginning.
-        */
-       if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
-               tx1_bug = false;
-#ifndef MODULE
-       } else if (!static_branch_likely(&arm64_const_caps_ready)) {
-               extern const struct midr_range cavium_erratum_27456_cpus[];
-
-               tx1_bug = is_midr_in_range_list(read_cpuid_id(),
-                                               cavium_erratum_27456_cpus);
-#endif
-       } else {
-               tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
-       }
-
-       return !tx1_bug && kaslr_offset() > 0;
+       return arm64_use_ng_mappings;
 }
 
 typedef void (*bp_hardening_cb_t)(void);
@@ -128,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               pgprot_t prot, bool page_mappings_only);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 extern void mark_linear_text_alias_ro(void);
+extern bool kaslr_requires_kpti(void);
 
 #define INIT_MM_CONTEXT(name)  \
        .pgd = init_pg_dir,
index d9fbd43..6bf5e65 100644 (file)
 #define PUD_TABLE_BIT          (_AT(pudval_t, 1) << 1)
 #define PUD_TYPE_MASK          (_AT(pudval_t, 3) << 0)
 #define PUD_TYPE_SECT          (_AT(pudval_t, 1) << 0)
+#define PUD_SECT_RDONLY                (_AT(pudval_t, 1) << 7)         /* AP[2] */
 
 /*
  * Level 2 descriptor (PMD).
 #define TCR_HD                 (UL(1) << 40)
 #define TCR_NFD0               (UL(1) << 53)
 #define TCR_NFD1               (UL(1) << 54)
+#define TCR_E0PD0              (UL(1) << 55)
+#define TCR_E0PD1              (UL(1) << 56)
 
 /*
  * TTBR.
index baf52ba..6f87839 100644 (file)
@@ -26,8 +26,8 @@
 #define _PROT_DEFAULT          (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define _PROT_SECT_DEFAULT     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PTE_MAYBE_NG           (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
-#define PMD_MAYBE_NG           (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
 
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
index 25a73aa..3994169 100644 (file)
@@ -8,7 +8,6 @@
 #include <asm-generic/sections.h>
 
 extern char __alt_instructions[], __alt_instructions_end[];
-extern char __exception_text_start[], __exception_text_end[];
 extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
 extern char __hyp_text_start[], __hyp_text_end[];
index 7434844..89cba26 100644 (file)
@@ -26,6 +26,8 @@ DECLARE_PER_CPU(bool, fpsimd_context_busy);
 static __must_check inline bool may_use_simd(void)
 {
        /*
+        * We must make sure that the SVE has been initialized properly
+        * before using the SIMD in kernel.
         * fpsimd_context_busy is only set while preemption is disabled,
         * and is clear whenever preemption is enabled. Since
         * this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
@@ -33,8 +35,10 @@ static __must_check inline bool may_use_simd(void)
         * migrated, and if it's clear we cannot be migrated to a CPU
         * where it is set.
         */
-       return !in_irq() && !irqs_disabled() && !in_nmi() &&
-               !this_cpu_read(fpsimd_context_busy);
+       return !WARN_ON(!system_capabilities_finalized()) &&
+              system_supports_fpsimd() &&
+              !in_irq() && !irqs_disabled() && !in_nmi() &&
+              !this_cpu_read(fpsimd_context_busy);
 }
 
 #else /* ! CONFIG_KERNEL_MODE_NEON */
index 6e919fa..b91570f 100644 (file)
 #define SYS_ID_ISAR4_EL1               sys_reg(3, 0, 0, 2, 4)
 #define SYS_ID_ISAR5_EL1               sys_reg(3, 0, 0, 2, 5)
 #define SYS_ID_MMFR4_EL1               sys_reg(3, 0, 0, 2, 6)
+#define SYS_ID_ISAR6_EL1               sys_reg(3, 0, 0, 2, 7)
 
 #define SYS_MVFR0_EL1                  sys_reg(3, 0, 0, 3, 0)
 #define SYS_MVFR1_EL1                  sys_reg(3, 0, 0, 3, 1)
 #define SYS_CTR_EL0                    sys_reg(3, 3, 0, 0, 1)
 #define SYS_DCZID_EL0                  sys_reg(3, 3, 0, 0, 7)
 
+#define SYS_RNDR_EL0                   sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0                 sys_reg(3, 3, 2, 4, 1)
+
 #define SYS_PMCR_EL0                   sys_reg(3, 3, 9, 12, 0)
 #define SYS_PMCNTENSET_EL0             sys_reg(3, 3, 9, 12, 1)
 #define SYS_PMCNTENCLR_EL0             sys_reg(3, 3, 9, 12, 2)
                         SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
                         ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
 
+/* MAIR_ELx memory attributes (used by Linux) */
+#define MAIR_ATTR_DEVICE_nGnRnE                UL(0x00)
+#define MAIR_ATTR_DEVICE_nGnRE         UL(0x04)
+#define MAIR_ATTR_DEVICE_GRE           UL(0x0c)
+#define MAIR_ATTR_NORMAL_NC            UL(0x44)
+#define MAIR_ATTR_NORMAL_WT            UL(0xbb)
+#define MAIR_ATTR_NORMAL               UL(0xff)
+#define MAIR_ATTR_MASK                 UL(0xff)
+
+/* Position the attr at the correct index */
+#define MAIR_ATTRIDX(attr, idx)                ((attr) << ((idx) * 8))
+
 /* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT                60
 #define ID_AA64ISAR0_TS_SHIFT          52
 #define ID_AA64ISAR0_FHM_SHIFT         48
 #define ID_AA64ISAR0_DP_SHIFT          44
 #define ID_AA64ISAR0_AES_SHIFT         4
 
 /* id_aa64isar1 */
+#define ID_AA64ISAR1_I8MM_SHIFT                52
+#define ID_AA64ISAR1_DGH_SHIFT         48
+#define ID_AA64ISAR1_BF16_SHIFT                44
+#define ID_AA64ISAR1_SPECRES_SHIFT     40
 #define ID_AA64ISAR1_SB_SHIFT          36
 #define ID_AA64ISAR1_FRINTTS_SHIFT     32
 #define ID_AA64ISAR1_GPI_SHIFT         28
 #define ID_AA64PFR1_SSBS_PSTATE_INSNS  2
 
 /* id_aa64zfr0 */
+#define ID_AA64ZFR0_F64MM_SHIFT                56
+#define ID_AA64ZFR0_F32MM_SHIFT                52
+#define ID_AA64ZFR0_I8MM_SHIFT         44
 #define ID_AA64ZFR0_SM4_SHIFT          40
 #define ID_AA64ZFR0_SHA3_SHIFT         32
+#define ID_AA64ZFR0_BF16_SHIFT         20
 #define ID_AA64ZFR0_BITPERM_SHIFT      16
 #define ID_AA64ZFR0_AES_SHIFT          4
 #define ID_AA64ZFR0_SVEVER_SHIFT       0
 
+#define ID_AA64ZFR0_F64MM              0x1
+#define ID_AA64ZFR0_F32MM              0x1
+#define ID_AA64ZFR0_I8MM               0x1
+#define ID_AA64ZFR0_BF16               0x1
 #define ID_AA64ZFR0_SM4                        0x1
 #define ID_AA64ZFR0_SHA3               0x1
 #define ID_AA64ZFR0_BITPERM            0x1
 #define ID_AA64MMFR1_VMIDBITS_16       2
 
 /* id_aa64mmfr2 */
+#define ID_AA64MMFR2_E0PD_SHIFT                60
 #define ID_AA64MMFR2_FWB_SHIFT         40
 #define ID_AA64MMFR2_AT_SHIFT          32
 #define ID_AA64MMFR2_LVA_SHIFT         16
 #define ID_ISAR5_AES_SHIFT             4
 #define ID_ISAR5_SEVL_SHIFT            0
 
+#define ID_ISAR6_I8MM_SHIFT            24
+#define ID_ISAR6_BF16_SHIFT            20
+#define ID_ISAR6_SPECRES_SHIFT         16
+#define ID_ISAR6_SB_SHIFT              12
+#define ID_ISAR6_FHM_SHIFT             8
+#define ID_ISAR6_DP_SHIFT              4
+#define ID_ISAR6_JSCVT_SHIFT           0
+
 #define MVFR0_FPROUND_SHIFT            28
 #define MVFR0_FPSHVEC_SHIFT            24
 #define MVFR0_FPSQRT_SHIFT             20
index a1e7288..7752d93 100644 (file)
 #define HWCAP2_SVESM4          (1 << 6)
 #define HWCAP2_FLAGM2          (1 << 7)
 #define HWCAP2_FRINT           (1 << 8)
+#define HWCAP2_SVEI8MM         (1 << 9)
+#define HWCAP2_SVEF32MM                (1 << 10)
+#define HWCAP2_SVEF64MM                (1 << 11)
+#define HWCAP2_SVEBF16         (1 << 12)
+#define HWCAP2_I8MM            (1 << 13)
+#define HWCAP2_BF16            (1 << 14)
+#define HWCAP2_DGH             (1 << 15)
+#define HWCAP2_RNG             (1 << 16)
 
 #endif /* _UAPI__ASM_HWCAP_H */
index 3a58e9d..a100483 100644 (file)
@@ -274,7 +274,7 @@ int apei_claim_sea(struct pt_regs *regs)
        if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
                return err;
 
-       current_flags = arch_local_save_flags();
+       current_flags = local_daif_save_flags();
 
        /*
         * SEA can interrupt SError, mask it and describe this as an NMI so
index ca158be..7832b32 100644 (file)
@@ -618,7 +618,8 @@ static struct insn_emulation_ops setend_ops = {
 };
 
 /*
- * Invoked as late_initcall, since not needed before init spawned.
+ * Invoked as core_initcall, which guarantees that the instruction
+ * emulation is ready for userspace.
  */
 static int __init armv8_deprecated_init(void)
 {
index 6ea337d..32c7bf8 100644 (file)
@@ -42,11 +42,11 @@ ENTRY(__cpu_soft_restart)
        mov     x0, #HVC_SOFT_RESTART
        hvc     #0                              // no return
 
-1:     mov     x18, x1                         // entry
+1:     mov     x8, x1                          // entry
        mov     x0, x2                          // arg0
        mov     x1, x3                          // arg1
        mov     x2, x4                          // arg2
-       br      x18
+       br      x8
 ENDPROC(__cpu_soft_restart)
 
 .popsection
index 85f4bec..703ad0a 100644 (file)
@@ -548,6 +548,8 @@ static const struct midr_range spectre_v2_safe_list[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
        MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
        { /* sentinel */ }
 };
 
@@ -757,6 +759,20 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = {
 };
 #endif
 
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+static const struct midr_range erratum_speculative_at_vhe_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_1165522
+       /* Cortex A76 r0p0 to r2p0 */
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1530923
+       /* Cortex A55 r0p0 to r2p0 */
+       MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+#endif
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -883,12 +899,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
        },
 #endif
-#ifdef CONFIG_ARM64_ERRATUM_1165522
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
        {
-               /* Cortex-A76 r0p0 to r2p0 */
-               .desc = "ARM erratum 1165522",
-               .capability = ARM64_WORKAROUND_1165522,
-               ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+               .desc = "ARM errata 1165522, 1530923",
+               .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
+               ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_1463225
@@ -925,7 +940,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1319367
        {
                .desc = "ARM erratum 1319367",
-               .capability = ARM64_WORKAROUND_1319367,
+               .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
                ERRATA_MIDR_RANGE_LIST(ca57_a72),
        },
 #endif
index 04cf64e..0b67156 100644 (file)
@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
 #define COMPAT_ELF_HWCAP_DEFAULT       \
                                (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
                                 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
-                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
-                                COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
-                                COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
                                 COMPAT_HWCAP_LPAE)
 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
 unsigned int compat_elf_hwcap2 __read_mostly;
@@ -47,19 +45,23 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
 /* Need also bit for ARM64_CB_PATCH */
 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
 
+bool arm64_use_ng_mappings = false;
+EXPORT_SYMBOL(arm64_use_ng_mappings);
+
 /*
  * Flag to indicate if we have computed the system wide
  * capabilities based on the boot time active CPUs. This
  * will be used to determine if a new booting CPU should
  * go through the verification process to make sure that it
  * supports the system capabilities, without using a hotplug
- * notifier.
+ * notifier. This is also used to decide if we could use
+ * the fast path for checking constant CPU caps.
  */
-static bool sys_caps_initialised;
-
-static inline void set_sys_caps_initialised(void)
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+static inline void finalize_system_capabilities(void)
 {
-       sys_caps_initialised = true;
+       static_branch_enable(&arm64_const_caps_ready);
 }
 
 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
@@ -119,6 +121,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
  * sync with the documentation of the CPU feature register ABI.
  */
 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
@@ -135,6 +138,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@@ -176,10 +183,18 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
@@ -225,6 +240,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
@@ -313,6 +329,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
        ARM64_FTR_END,
 };
 
+static const struct arm64_ftr_bits ftr_id_isar6[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
@@ -396,6 +423,7 @@ static const struct __ftr_reg_entry {
        ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
        ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
        ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
+       ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
 
        /* Op1 = 0, CRn = 0, CRm = 3 */
        ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
@@ -600,6 +628,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
                init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
                init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
                init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+               init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
                init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
                init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
                init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
@@ -753,6 +782,8 @@ void update_cpu_features(int cpu,
                                        info->reg_id_isar4, boot->reg_id_isar4);
                taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
                                        info->reg_id_isar5, boot->reg_id_isar5);
+               taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
+                                       info->reg_id_isar6, boot->reg_id_isar6);
 
                /*
                 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
@@ -785,7 +816,7 @@ void update_cpu_features(int cpu,
 
                /* Probe vector lengths, unless we already gave up on SVE */
                if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
-                   !sys_caps_initialised)
+                   !system_capabilities_finalized())
                        sve_update_vq_map();
        }
 
@@ -831,6 +862,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
        read_sysreg_case(SYS_ID_ISAR3_EL1);
        read_sysreg_case(SYS_ID_ISAR4_EL1);
        read_sysreg_case(SYS_ID_ISAR5_EL1);
+       read_sysreg_case(SYS_ID_ISAR6_EL1);
        read_sysreg_case(SYS_MVFR0_EL1);
        read_sysreg_case(SYS_MVFR1_EL1);
        read_sysreg_case(SYS_MVFR2_EL1);
@@ -965,6 +997,46 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
        return has_cpuid_feature(entry, scope);
 }
 
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+bool kaslr_requires_kpti(void)
+{
+       if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+               return false;
+
+       /*
+        * E0PD does a similar job to KPTI so can be used instead
+        * where available.
+        */
+       if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+               u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+               if (cpuid_feature_extract_unsigned_field(mmfr2,
+                                               ID_AA64MMFR2_E0PD_SHIFT))
+                       return false;
+       }
+
+       /*
+        * Systems affected by Cavium erratum 24756 are incompatible
+        * with KPTI.
+        */
+       if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+               extern const struct midr_range cavium_erratum_27456_cpus[];
+
+               if (is_midr_in_range_list(read_cpuid_id(),
+                                         cavium_erratum_27456_cpus))
+                       return false;
+       }
+
+       return kaslr_offset() > 0;
+}
+
 static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
@@ -975,6 +1047,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        static const struct midr_range kpti_safe_list[] = {
                MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
                MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+               MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
@@ -1008,7 +1081,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        }
 
        /* Useful for KASLR robustness */
-       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+       if (kaslr_requires_kpti()) {
                if (!__kpti_forced) {
                        str = "KASLR";
                        __kpti_forced = 1;
@@ -1043,7 +1116,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
        extern kpti_remap_fn idmap_kpti_install_ng_mappings;
        kpti_remap_fn *remap_fn;
 
-       static bool kpti_applied = false;
        int cpu = smp_processor_id();
 
        /*
@@ -1051,7 +1123,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
         * it already or we have KASLR enabled and therefore have not
         * created any global mappings at all.
         */
-       if (kpti_applied || kaslr_offset() > 0)
+       if (arm64_use_ng_mappings)
                return;
 
        remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -1061,7 +1133,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
        cpu_uninstall_idmap();
 
        if (!cpu)
-               kpti_applied = true;
+               arm64_use_ng_mappings = true;
 
        return;
 }
@@ -1251,6 +1323,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
 }
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
+#ifdef CONFIG_ARM64_E0PD
+static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
+{
+       if (this_cpu_has_cap(ARM64_HAS_E0PD))
+               sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
+}
+#endif /* CONFIG_ARM64_E0PD */
+
 #ifdef CONFIG_ARM64_PSEUDO_NMI
 static bool enable_pseudo_nmi;
 
@@ -1291,7 +1371,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_enable_pan,
        },
 #endif /* CONFIG_ARM64_PAN */
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
        {
                .desc = "LSE atomic instructions",
                .capability = ARM64_HAS_LSE_ATOMICS,
@@ -1302,7 +1382,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sign = FTR_UNSIGNED,
                .min_field_value = 2,
        },
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
        {
                .desc = "Software prefetching using PRFM",
                .capability = ARM64_HAS_NO_HW_PREFETCH,
@@ -1368,7 +1448,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                /* FP/SIMD is not implemented */
                .capability = ARM64_HAS_NO_FPSIMD,
-               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
                .min_field_value = 0,
                .matches = has_no_fpsimd,
        },
@@ -1566,6 +1646,31 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sign = FTR_UNSIGNED,
                .min_field_value = 1,
        },
+#endif
+#ifdef CONFIG_ARM64_E0PD
+       {
+               .desc = "E0PD",
+               .capability = ARM64_HAS_E0PD,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .sign = FTR_UNSIGNED,
+               .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
+               .matches = has_cpuid_feature,
+               .min_field_value = 1,
+               .cpu_enable = cpu_enable_e0pd,
+       },
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+       {
+               .desc = "Random Number Generator",
+               .capability = ARM64_HAS_RNG,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR0_EL1,
+               .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = 1,
+       },
 #endif
        {},
 };
@@ -1596,6 +1701,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .match_list = list,                                             \
        }
 
+#define HWCAP_CAP_MATCH(match, cap_type, cap)                                  \
+       {                                                                       \
+               __HWCAP_CAP(#cap, cap_type, cap)                                \
+               .matches = match,                                               \
+       }
+
 #ifdef CONFIG_ARM64_PTR_AUTH
 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
        {
@@ -1638,6 +1749,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
@@ -1651,6 +1763,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
        HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
 #ifdef CONFIG_ARM64_SVE
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
@@ -1658,8 +1773,12 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
 #endif
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
 #ifdef CONFIG_ARM64_PTR_AUTH
@@ -1669,8 +1788,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        {},
 };
 
+#ifdef CONFIG_COMPAT
+static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
+{
+       /*
+        * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
+        * in line with that of arm32 as in vfp_init(). We make sure that the
+        * check is future proof, by making sure value is non-zero.
+        */
+       u32 mvfr1;
+
+       WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+       if (scope == SCOPE_SYSTEM)
+               mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
+       else
+               mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
+
+       return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
+               cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
+               cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+}
+#endif
+
 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
 #ifdef CONFIG_COMPAT
+       HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
+       HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+       /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
+       HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+       HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
@@ -1974,7 +2120,7 @@ void check_local_cpu_capabilities(void)
         * Otherwise, this CPU should verify that it has all the system
         * advertised capabilities.
         */
-       if (!sys_caps_initialised)
+       if (!system_capabilities_finalized())
                update_cpu_capabilities(SCOPE_LOCAL_CPU);
        else
                verify_local_cpu_capabilities();
@@ -1988,14 +2134,6 @@ static void __init setup_boot_cpu_capabilities(void)
        enable_cpu_capabilities(SCOPE_BOOT_CPU);
 }
 
-DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
-EXPORT_SYMBOL(arm64_const_caps_ready);
-
-static void __init mark_const_caps_ready(void)
-{
-       static_branch_enable(&arm64_const_caps_ready);
-}
-
 bool this_cpu_has_cap(unsigned int n)
 {
        if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
@@ -2054,7 +2192,6 @@ void __init setup_cpu_features(void)
        u32 cwg;
 
        setup_system_capabilities();
-       mark_const_caps_ready();
        setup_elf_hwcaps(arm64_elf_hwcaps);
 
        if (system_supports_32bit_el0())
@@ -2067,7 +2204,7 @@ void __init setup_cpu_features(void)
        minsigstksz_setup();
 
        /* Advertise that we have computed the system capabilities */
-       set_sys_caps_initialised();
+       finalize_system_capabilities();
 
        /*
         * Check for sane CTR_EL0.CWG value.
index 56bba74..8613607 100644 (file)
@@ -84,6 +84,14 @@ static const char *const hwcap_str[] = {
        "svesm4",
        "flagm2",
        "frint",
+       "svei8mm",
+       "svef32mm",
+       "svef64mm",
+       "svebf16",
+       "i8mm",
+       "bf16",
+       "dgh",
+       "rng",
        NULL
 };
 
@@ -360,6 +368,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
                info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
                info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
                info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+               info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
                info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
                info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
                info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
index 5dce5e5..fde5998 100644 (file)
@@ -36,14 +36,14 @@ static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
 }
 NOKPROBE_SYMBOL(el1_pc);
 
-static void el1_undef(struct pt_regs *regs)
+static void notrace el1_undef(struct pt_regs *regs)
 {
        local_daif_inherit(regs);
        do_undefinstr(regs);
 }
 NOKPROBE_SYMBOL(el1_undef);
 
-static void el1_inv(struct pt_regs *regs, unsigned long esr)
+static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
 {
        local_daif_inherit(regs);
        bad_mode(regs, 0, esr);
@@ -215,7 +215,7 @@ static void notrace el0_svc(struct pt_regs *regs)
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-       el0_svc_handler(regs);
+       do_el0_svc(regs);
 }
 NOKPROBE_SYMBOL(el0_svc);
 
@@ -281,7 +281,7 @@ static void notrace el0_svc_compat(struct pt_regs *regs)
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-       el0_svc_compat_handler(regs);
+       do_el0_svc_compat(regs);
 }
 NOKPROBE_SYMBOL(el0_svc_compat);
 
index 7c6a0a4..1b6b7a8 100644 (file)
        .macro kernel_ventry, el, label, regsize = 64
        .align 7
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
        .if     \el == 0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
        .if     \regsize == 64
        mrs     x30, tpidrro_el0
        msr     tpidrro_el0, xzr
        .else
        mov     x30, xzr
        .endif
-       .endif
 alternative_else_nop_endif
+       .endif
 #endif
 
        sub     sp, sp, #S_FRAME_SIZE
@@ -167,9 +167,13 @@ alternative_cb_end
        .if     \el == 0
        clear_gp_regs
        mrs     x21, sp_el0
-       ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
-       ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
-       disable_step_tsk x19, x20               // exceptions when scheduling.
+       ldr_this_cpu    tsk, __entry_task, x20
+       msr     sp_el0, tsk
+
+       // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+       // when scheduling.
+       ldr     x19, [tsk, #TSK_TI_FLAGS]
+       disable_step_tsk x19, x20
 
        apply_ssbd 1, x22, x23
 
@@ -232,13 +236,6 @@ alternative_else_nop_endif
        str     w21, [sp, #S_SYSCALLNO]
        .endif
 
-       /*
-        * Set sp_el0 to current thread_info.
-        */
-       .if     \el == 0
-       msr     sp_el0, tsk
-       .endif
-
        /* Save pmr */
 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
        mrs_s   x20, SYS_ICC_PMR_EL1
@@ -653,6 +650,7 @@ el0_sync:
        mov     x0, sp
        bl      el0_sync_handler
        b       ret_to_user
+ENDPROC(el0_sync)
 
 #ifdef CONFIG_COMPAT
        .align  6
@@ -661,16 +659,18 @@ el0_sync_compat:
        mov     x0, sp
        bl      el0_sync_compat_handler
        b       ret_to_user
-ENDPROC(el0_sync)
+ENDPROC(el0_sync_compat)
 
        .align  6
 el0_irq_compat:
        kernel_entry 0, 32
        b       el0_irq_naked
+ENDPROC(el0_irq_compat)
 
 el0_error_compat:
        kernel_entry 0, 32
        b       el0_error_naked
+ENDPROC(el0_error_compat)
 #endif
 
        .align  6
index 3eb338f..94289d1 100644 (file)
@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
  */
 static void task_fpsimd_load(void)
 {
+       WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
        if (system_supports_sve() && test_thread_flag(TIF_SVE))
@@ -289,6 +290,7 @@ static void fpsimd_save(void)
                this_cpu_ptr(&fpsimd_last_state);
        /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
 
+       WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
        struct fpsimd_last_state_struct *last =
                this_cpu_ptr(&fpsimd_last_state);
 
+       WARN_ON(!system_supports_fpsimd());
        last->st = &current->thread.uw.fpsimd_state;
        last->sve_state = current->thread.sve_state;
        last->sve_vl = current->thread.sve_vl;
@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
        struct fpsimd_last_state_struct *last =
                this_cpu_ptr(&fpsimd_last_state);
 
+       WARN_ON(!system_supports_fpsimd());
        WARN_ON(!in_softirq() && !irqs_disabled());
 
        last->st = st;
@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
  */
 void fpsimd_restore_current_state(void)
 {
-       if (!system_supports_fpsimd())
+       /*
+        * For the tasks that were created before we detected the absence of
+        * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
+        * e.g, init. This could be then inherited by the children processes.
+        * If we later detect that the system doesn't support FP/SIMD,
+        * we must clear the flag for  all the tasks to indicate that the
+        * FPSTATE is clean (as we can't have one) to avoid looping for ever in
+        * do_notify_resume().
+        */
+       if (!system_supports_fpsimd()) {
+               clear_thread_flag(TIF_FOREIGN_FPSTATE);
                return;
+       }
 
        get_cpu_fpsimd_context();
 
@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
  */
 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
 {
-       if (!system_supports_fpsimd())
+       if (WARN_ON(!system_supports_fpsimd()))
                return;
 
        get_cpu_fpsimd_context();
@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
 void fpsimd_flush_task_state(struct task_struct *t)
 {
        t->thread.fpsimd_cpu = NR_CPUS;
-
+       /*
+        * If we don't support fpsimd, bail out after we have
+        * reset the fpsimd_cpu for this task and clear the
+        * FPSTATE.
+        */
+       if (!system_supports_fpsimd())
+               return;
        barrier();
        set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
 
@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
  */
 static void fpsimd_flush_cpu_state(void)
 {
+       WARN_ON(!system_supports_fpsimd());
        __this_cpu_write(fpsimd_last_state.st, NULL);
        set_thread_flag(TIF_FOREIGN_FPSTATE);
 }
@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
  */
 void fpsimd_save_and_flush_cpu_state(void)
 {
+       if (!system_supports_fpsimd())
+               return;
        WARN_ON(preemptible());
        __get_cpu_fpsimd_context();
        fpsimd_save();
index a96b292..590963c 100644 (file)
@@ -182,78 +182,79 @@ int arch_hibernation_header_restore(void *addr)
 }
 EXPORT_SYMBOL(arch_hibernation_header_restore);
 
-/*
- * Copies length bytes, starting at src_start into an new page,
- * perform cache maintentance, then maps it at the specified address low
- * address as executable.
- *
- * This is used by hibernate to copy the code it needs to execute when
- * overwriting the kernel text. This function generates a new set of page
- * tables, which it loads into ttbr0.
- *
- * Length is provided as we probably only want 4K of data, even on a 64K
- * page system.
- */
-static int create_safe_exec_page(void *src_start, size_t length,
-                                unsigned long dst_addr,
-                                phys_addr_t *phys_dst_addr,
-                                void *(*allocator)(gfp_t mask),
-                                gfp_t mask)
+static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
+                      unsigned long dst_addr,
+                      pgprot_t pgprot)
 {
-       int rc = 0;
-       pgd_t *trans_pgd;
        pgd_t *pgdp;
        pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
-       unsigned long dst = (unsigned long)allocator(mask);
-
-       if (!dst) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       memcpy((void *)dst, src_start, length);
-       __flush_icache_range(dst, dst + length);
-
-       trans_pgd = allocator(mask);
-       if (!trans_pgd) {
-               rc = -ENOMEM;
-               goto out;
-       }
 
        pgdp = pgd_offset_raw(trans_pgd, dst_addr);
        if (pgd_none(READ_ONCE(*pgdp))) {
-               pudp = allocator(mask);
-               if (!pudp) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
+               pudp = (void *)get_safe_page(GFP_ATOMIC);
+               if (!pudp)
+                       return -ENOMEM;
                pgd_populate(&init_mm, pgdp, pudp);
        }
 
        pudp = pud_offset(pgdp, dst_addr);
        if (pud_none(READ_ONCE(*pudp))) {
-               pmdp = allocator(mask);
-               if (!pmdp) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
+               pmdp = (void *)get_safe_page(GFP_ATOMIC);
+               if (!pmdp)
+                       return -ENOMEM;
                pud_populate(&init_mm, pudp, pmdp);
        }
 
        pmdp = pmd_offset(pudp, dst_addr);
        if (pmd_none(READ_ONCE(*pmdp))) {
-               ptep = allocator(mask);
-               if (!ptep) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
+               ptep = (void *)get_safe_page(GFP_ATOMIC);
+               if (!ptep)
+                       return -ENOMEM;
                pmd_populate_kernel(&init_mm, pmdp, ptep);
        }
 
        ptep = pte_offset_kernel(pmdp, dst_addr);
-       set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
+       set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
+
+       return 0;
+}
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintenance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+                                unsigned long dst_addr,
+                                phys_addr_t *phys_dst_addr)
+{
+       void *page = (void *)get_safe_page(GFP_ATOMIC);
+       pgd_t *trans_pgd;
+       int rc;
+
+       if (!page)
+               return -ENOMEM;
+
+       memcpy(page, src_start, length);
+       __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+
+       trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
+       if (!trans_pgd)
+               return -ENOMEM;
+
+       rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
+                               PAGE_KERNEL_EXEC);
+       if (rc)
+               return rc;
 
        /*
         * Load our new page tables. A strict BBM approach requires that we
@@ -269,13 +270,12 @@ static int create_safe_exec_page(void *src_start, size_t length,
         */
        cpu_set_reserved_ttbr0();
        local_flush_tlb_all();
-       write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
+       write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
        isb();
 
-       *phys_dst_addr = virt_to_phys((void *)dst);
+       *phys_dst_addr = virt_to_phys(page);
 
-out:
-       return rc;
+       return 0;
 }
 
 #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
@@ -450,7 +450,7 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
                                return -ENOMEM;
                } else {
                        set_pud(dst_pudp,
-                               __pud(pud_val(pud) & ~PMD_SECT_RDONLY));
+                               __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
                }
        } while (dst_pudp++, src_pudp++, addr = next, addr != end);
 
@@ -476,6 +476,24 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
        return 0;
 }
 
+static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
+                         unsigned long end)
+{
+       int rc;
+       pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+
+       if (!trans_pgd) {
+               pr_err("Failed to allocate memory for temporary page tables.\n");
+               return -ENOMEM;
+       }
+
+       rc = copy_page_tables(trans_pgd, start, end);
+       if (!rc)
+               *dst_pgdp = trans_pgd;
+
+       return rc;
+}
+
 /*
  * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
  *
@@ -484,7 +502,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
  */
 int swsusp_arch_resume(void)
 {
-       int rc = 0;
+       int rc;
        void *zero_page;
        size_t exit_size;
        pgd_t *tmp_pg_dir;
@@ -497,15 +515,9 @@ int swsusp_arch_resume(void)
         * Create a second copy of just the linear map, and use this when
         * restoring.
         */
-       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
-       if (!tmp_pg_dir) {
-               pr_err("Failed to allocate memory for temporary page tables.\n");
-               rc = -ENOMEM;
-               goto out;
-       }
-       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
+       rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
        if (rc)
-               goto out;
+               return rc;
 
        /*
         * We need a zero page that is zero before & after resume in order to
@@ -514,8 +526,7 @@ int swsusp_arch_resume(void)
        zero_page = (void *)get_safe_page(GFP_ATOMIC);
        if (!zero_page) {
                pr_err("Failed to allocate zero page.\n");
-               rc = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
 
        /*
@@ -530,11 +541,10 @@ int swsusp_arch_resume(void)
         */
        rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
                                   (unsigned long)hibernate_exit,
-                                  &phys_hibernate_exit,
-                                  (void *)get_safe_page, GFP_ATOMIC);
+                                  &phys_hibernate_exit);
        if (rc) {
                pr_err("Failed to create safe executable page for hibernate_exit code.\n");
-               goto out;
+               return rc;
        }
 
        /*
@@ -561,8 +571,7 @@ int swsusp_arch_resume(void)
                       resume_hdr.reenter_kernel, restore_pblist,
                       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
 
-out:
-       return rc;
+       return 0;
 }
 
 int hibernate_resume_nonboot_cpu_disable(void)
index 2a11a96..53b8a4e 100644 (file)
@@ -120,6 +120,17 @@ u64 __init kaslr_early_init(u64 dt_phys)
                return 0;
        }
 
+       /*
+        * Mix in any entropy obtainable architecturally, open coded
+        * since this runs extremely early.
+        */
+       if (__early_cpu_has_rndr()) {
+               unsigned long raw;
+
+               if (__arm64_rndr(&raw))
+                       seed ^= raw;
+       }
+
        if (!seed) {
                kaslr_status = KASLR_DISABLED_NO_SEED;
                return 0;
index 29a9428..af9987c 100644 (file)
@@ -47,10 +47,6 @@ static void *image_load(struct kimage *image,
        struct kexec_segment *kernel_segment;
        int ret;
 
-       /* We don't support crash kernels yet. */
-       if (image->type == KEXEC_TYPE_CRASH)
-               return ERR_PTR(-EOPNOTSUPP);
-
        /*
         * We require a kernel with an unambiguous Image header. Per
         * Documentation/arm64/booting.rst, this is the case when image_size
index 0df8493..8e9c924 100644 (file)
@@ -160,18 +160,6 @@ void machine_kexec(struct kimage *kimage)
 
        kexec_image_info(kimage);
 
-       pr_debug("%s:%d: control_code_page:        %p\n", __func__, __LINE__,
-               kimage->control_code_page);
-       pr_debug("%s:%d: reboot_code_buffer_phys:  %pa\n", __func__, __LINE__,
-               &reboot_code_buffer_phys);
-       pr_debug("%s:%d: reboot_code_buffer:       %p\n", __func__, __LINE__,
-               reboot_code_buffer);
-       pr_debug("%s:%d: relocate_new_kernel:      %p\n", __func__, __LINE__,
-               arm64_relocate_new_kernel);
-       pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
-               __func__, __LINE__, arm64_relocate_new_kernel_size,
-               arm64_relocate_new_kernel_size);
-
        /*
         * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
         * after the kernel is shut down.
index 7b08bf9..dd3ae80 100644 (file)
 #include <linux/memblock.h>
 #include <linux/of_fdt.h>
 #include <linux/random.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 #include <asm/byteorder.h>
 
 /* relevant device tree properties */
+#define FDT_PROP_KEXEC_ELFHDR  "linux,elfcorehdr"
+#define FDT_PROP_MEM_RANGE     "linux,usable-memory-range"
 #define FDT_PROP_INITRD_START  "linux,initrd-start"
 #define FDT_PROP_INITRD_END    "linux,initrd-end"
 #define FDT_PROP_BOOTARGS      "bootargs"
@@ -40,6 +43,10 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
        vfree(image->arch.dtb);
        image->arch.dtb = NULL;
 
+       vfree(image->arch.elf_headers);
+       image->arch.elf_headers = NULL;
+       image->arch.elf_headers_sz = 0;
+
        return kexec_image_post_load_cleanup_default(image);
 }
 
@@ -55,6 +62,31 @@ static int setup_dtb(struct kimage *image,
 
        off = ret;
 
+       ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR);
+       if (ret && ret != -FDT_ERR_NOTFOUND)
+               goto out;
+       ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE);
+       if (ret && ret != -FDT_ERR_NOTFOUND)
+               goto out;
+
+       if (image->type == KEXEC_TYPE_CRASH) {
+               /* add linux,elfcorehdr */
+               ret = fdt_appendprop_addrrange(dtb, 0, off,
+                               FDT_PROP_KEXEC_ELFHDR,
+                               image->arch.elf_headers_mem,
+                               image->arch.elf_headers_sz);
+               if (ret)
+                       return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+
+               /* add linux,usable-memory-range */
+               ret = fdt_appendprop_addrrange(dtb, 0, off,
+                               FDT_PROP_MEM_RANGE,
+                               crashk_res.start,
+                               crashk_res.end - crashk_res.start + 1);
+               if (ret)
+                       return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+       }
+
        /* add bootargs */
        if (cmdline) {
                ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
@@ -125,8 +157,8 @@ out:
 }
 
 /*
- * More space needed so that we can add initrd, bootargs, kaslr-seed, and
- * rng-seed.
+ * More space needed so that we can add initrd, bootargs, kaslr-seed,
+ * rng-seed, userable-memory-range and elfcorehdr.
  */
 #define DTB_EXTRA_SPACE 0x1000
 
@@ -174,6 +206,43 @@ static int create_dtb(struct kimage *image,
        }
 }
 
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+       struct crash_mem *cmem;
+       unsigned int nr_ranges;
+       int ret;
+       u64 i;
+       phys_addr_t start, end;
+
+       nr_ranges = 1; /* for exclusion of crashkernel region */
+       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+                                       MEMBLOCK_NONE, &start, &end, NULL)
+               nr_ranges++;
+
+       cmem = kmalloc(sizeof(struct crash_mem) +
+                       sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
+       if (!cmem)
+               return -ENOMEM;
+
+       cmem->max_nr_ranges = nr_ranges;
+       cmem->nr_ranges = 0;
+       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+                                       MEMBLOCK_NONE, &start, &end, NULL) {
+               cmem->ranges[cmem->nr_ranges].start = start;
+               cmem->ranges[cmem->nr_ranges].end = end - 1;
+               cmem->nr_ranges++;
+       }
+
+       /* Exclude crashkernel region */
+       ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+
+       if (!ret)
+               ret =  crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+       kfree(cmem);
+       return ret;
+}
+
 int load_other_segments(struct kimage *image,
                        unsigned long kernel_load_addr,
                        unsigned long kernel_size,
@@ -181,14 +250,43 @@ int load_other_segments(struct kimage *image,
                        char *cmdline)
 {
        struct kexec_buf kbuf;
-       void *dtb = NULL;
-       unsigned long initrd_load_addr = 0, dtb_len;
+       void *headers, *dtb = NULL;
+       unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
        int ret = 0;
 
        kbuf.image = image;
        /* not allocate anything below the kernel */
        kbuf.buf_min = kernel_load_addr + kernel_size;
 
+       /* load elf core header */
+       if (image->type == KEXEC_TYPE_CRASH) {
+               ret = prepare_elf_headers(&headers, &headers_sz);
+               if (ret) {
+                       pr_err("Preparing elf core header failed\n");
+                       goto out_err;
+               }
+
+               kbuf.buffer = headers;
+               kbuf.bufsz = headers_sz;
+               kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+               kbuf.memsz = headers_sz;
+               kbuf.buf_align = SZ_64K; /* largest supported page size */
+               kbuf.buf_max = ULONG_MAX;
+               kbuf.top_down = true;
+
+               ret = kexec_add_buffer(&kbuf);
+               if (ret) {
+                       vfree(headers);
+                       goto out_err;
+               }
+               image->arch.elf_headers = headers;
+               image->arch.elf_headers_mem = kbuf.mem;
+               image->arch.elf_headers_sz = headers_sz;
+
+               pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+                        image->arch.elf_headers_mem, headers_sz, headers_sz);
+       }
+
        /* load initrd */
        if (initrd) {
                kbuf.buffer = initrd;
index d54586d..bbb0f0c 100644 (file)
@@ -646,6 +646,6 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void)
         * Only allow a task to be preempted once cpufeatures have been
         * enabled.
         */
-       if (static_branch_likely(&arm64_const_caps_ready))
+       if (system_capabilities_finalized())
                preempt_schedule_irq();
 }
index 6771c39..cd6e5fa 100644 (file)
@@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
        return 0;
 }
 
+static int fpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+       if (!system_supports_fpsimd())
+               return -ENODEV;
+       return regset->n;
+}
+
 /*
  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  */
@@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        if (target == current)
                fpsimd_preserve_current_state();
 
@@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 {
        int ret;
 
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
        if (ret)
                return ret;
@@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
                 */
                .size = sizeof(u32),
                .align = sizeof(u32),
+               .active = fpr_active,
                .get = fpr_get,
                .set = fpr_set
        },
@@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
        compat_ulong_t fpscr;
        int ret, vregs_end_pos;
 
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        uregs = &target->thread.uw.fpsimd_state;
 
        if (target == current)
@@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
        compat_ulong_t fpscr;
        int ret, vregs_end_pos;
 
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        uregs = &target->thread.uw.fpsimd_state;
 
        vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
@@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
                .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
                .size = sizeof(compat_ulong_t),
                .align = sizeof(compat_ulong_t),
+               .active = fpr_active,
                .get = compat_vfp_get,
                .set = compat_vfp_set
        },
index 56f6645..b6f9455 100644 (file)
@@ -285,6 +285,13 @@ void __init setup_arch(char **cmdline_p)
 
        *cmdline_p = boot_command_line;
 
+       /*
+        * If know now we are going to need KPTI then use non-global
+        * mappings from the start, avoiding the cost of rewriting
+        * everything later.
+        */
+       arm64_use_ng_mappings = kaslr_requires_kpti();
+
        early_fixmap_init();
        early_ioremap_init();
 
index dd2cdc0..339882d 100644 (file)
@@ -371,6 +371,8 @@ static int parse_user_sigframe(struct user_ctxs *user,
                        goto done;
 
                case FPSIMD_MAGIC:
+                       if (!system_supports_fpsimd())
+                               goto invalid;
                        if (user->fpsimd)
                                goto invalid;
 
@@ -506,7 +508,7 @@ static int restore_sigframe(struct pt_regs *regs,
        if (err == 0)
                err = parse_user_sigframe(&user, sf);
 
-       if (err == 0) {
+       if (err == 0 && system_supports_fpsimd()) {
                if (!user.fpsimd)
                        return -EINVAL;
 
@@ -623,7 +625,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
 
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 
-       if (err == 0) {
+       if (err == 0 && system_supports_fpsimd()) {
                struct fpsimd_context __user *fpsimd_ctx =
                        apply_user_offset(user, user->fpsimd_offset);
                err |= preserve_fpsimd_context(fpsimd_ctx);
index 12a5853..82feca6 100644 (file)
@@ -223,7 +223,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
        err |= !valid_user_regs(&regs->user_regs, current);
 
        aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
-       if (err == 0)
+       if (err == 0 && system_supports_fpsimd())
                err |= compat_restore_vfp_context(&aux->vfp);
 
        return err;
@@ -419,7 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
 
        aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
 
-       if (err == 0)
+       if (err == 0 && system_supports_fpsimd())
                err |= compat_preserve_vfp_context(&aux->vfp);
        __put_user_error(0, &aux->end_magic, err);
 
index 52cfc61..b26955f 100644 (file)
@@ -37,7 +37,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
 
        /* Unsupported */
        if (state == ARM64_SSBD_UNKNOWN)
-               return -EINVAL;
+               return -ENODEV;
 
        /* Treat the unaffected/mitigated state separately */
        if (state == ARM64_SSBD_MITIGATED) {
@@ -102,7 +102,7 @@ static int ssbd_prctl_get(struct task_struct *task)
 {
        switch (arm64_get_ssbd_state()) {
        case ARM64_SSBD_UNKNOWN:
-               return -EINVAL;
+               return -ENODEV;
        case ARM64_SSBD_FORCE_ENABLE:
                return PR_SPEC_DISABLE;
        case ARM64_SSBD_KERNEL:
index 9a9d98a..a12c0c8 100644 (file)
@@ -154,14 +154,14 @@ static inline void sve_user_discard(void)
        sve_user_disable();
 }
 
-void el0_svc_handler(struct pt_regs *regs)
+void do_el0_svc(struct pt_regs *regs)
 {
        sve_user_discard();
        el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
 }
 
 #ifdef CONFIG_COMPAT
-void el0_svc_compat_handler(struct pt_regs *regs)
+void do_el0_svc_compat(struct pt_regs *regs)
 {
        el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
                       compat_sys_call_table);
index e5cc8d6..0c6832e 100644 (file)
        .text
        .pushsection    .hyp.text, "ax"
 
+/*
+ * We treat x18 as callee-saved as the host may use it as a platform
+ * register (e.g. for shadow call stack).
+ */
 .macro save_callee_saved_regs ctxt
+       str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
        stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
        stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
        stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
@@ -32,6 +37,8 @@
 .endm
 
 .macro restore_callee_saved_regs ctxt
+       // We require \ctxt is not x18-x28
+       ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
        ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
        ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
        ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
@@ -48,7 +55,7 @@ ENTRY(__guest_enter)
        // x0: vcpu
        // x1: host context
        // x2-x17: clobbered by macros
-       // x18: guest context
+       // x29: guest context
 
        // Store the host regs
        save_callee_saved_regs x1
@@ -67,31 +74,28 @@ alternative_else_nop_endif
        ret
 
 1:
-       add     x18, x0, #VCPU_CONTEXT
+       add     x29, x0, #VCPU_CONTEXT
 
        // Macro ptrauth_switch_to_guest format:
        //      ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
        // The below macro to restore guest keys is not implemented in C code
        // as it may cause Pointer Authentication key signing mismatch errors
        // when this feature is enabled for kernel code.
-       ptrauth_switch_to_guest x18, x0, x1, x2
+       ptrauth_switch_to_guest x29, x0, x1, x2
 
        // Restore guest regs x0-x17
-       ldp     x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
-       ldp     x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
-       ldp     x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
-       ldp     x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
-       ldp     x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
-       ldp     x10, x11, [x18, #CPU_XREG_OFFSET(10)]
-       ldp     x12, x13, [x18, #CPU_XREG_OFFSET(12)]
-       ldp     x14, x15, [x18, #CPU_XREG_OFFSET(14)]
-       ldp     x16, x17, [x18, #CPU_XREG_OFFSET(16)]
-
-       // Restore guest regs x19-x29, lr
-       restore_callee_saved_regs x18
-
-       // Restore guest reg x18
-       ldr     x18,      [x18, #CPU_XREG_OFFSET(18)]
+       ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
+       ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
+       ldp     x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
+       ldp     x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
+       ldp     x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
+       ldp     x10, x11, [x29, #CPU_XREG_OFFSET(10)]
+       ldp     x12, x13, [x29, #CPU_XREG_OFFSET(12)]
+       ldp     x14, x15, [x29, #CPU_XREG_OFFSET(14)]
+       ldp     x16, x17, [x29, #CPU_XREG_OFFSET(16)]
+
+       // Restore guest regs x18-x29, lr
+       restore_callee_saved_regs x29
 
        // Do not touch any register after this!
        eret
@@ -114,7 +118,7 @@ ENTRY(__guest_exit)
        // Retrieve the guest regs x0-x1 from the stack
        ldp     x2, x3, [sp], #16       // x0, x1
 
-       // Store the guest regs x0-x1 and x4-x18
+       // Store the guest regs x0-x1 and x4-x17
        stp     x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
        stp     x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
        stp     x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
@@ -123,9 +127,8 @@ ENTRY(__guest_exit)
        stp     x12, x13, [x1, #CPU_XREG_OFFSET(12)]
        stp     x14, x15, [x1, #CPU_XREG_OFFSET(14)]
        stp     x16, x17, [x1, #CPU_XREG_OFFSET(16)]
-       str     x18,      [x1, #CPU_XREG_OFFSET(18)]
 
-       // Store the guest regs x19-x29, lr
+       // Store the guest regs x18-x29, lr
        save_callee_saved_regs x1
 
        get_host_ctxt   x2, x3
index 72fbbd8..dfe8dd1 100644 (file)
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+       /*
+        * When the system doesn't support FP/SIMD, we cannot rely on
+        * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
+        * abort on the very first access to FP and thus we should never
+        * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
+        * trap the accesses.
+        */
+       if (!system_supports_fpsimd() ||
+           vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
                vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
                                      KVM_ARM64_FP_HOST);
 
@@ -119,7 +127,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        write_sysreg(val, cptr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
 
                isb();
@@ -158,11 +166,11 @@ static void deactivate_traps_vhe(void)
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
 
        /*
-        * ARM erratum 1165522 requires the actual execution of the above
-        * before we can switch to the EL2/EL0 translation regime used by
+        * ARM errata 1165522 and 1530923 require the actual execution of the
+        * above before we can switch to the EL2/EL0 translation regime used by
         * the host.
         */
-       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
 
        write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
        write_sysreg(vectors, vbar_el1);
@@ -173,7 +181,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
 {
        u64 mdcr_el2 = read_sysreg(mdcr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
index 22b8128..7672a97 100644 (file)
@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
        write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
 
-       if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
                write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
        } else  if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
        write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
            ctxt->__hyp_running_vcpu) {
                /*
                 * Must only be done for host registers, hence the context
index c2bc17c..92f560e 100644 (file)
@@ -23,10 +23,10 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 
        local_irq_save(cxt->flags);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
                /*
-                * For CPUs that are affected by ARM erratum 1165522, we
-                * cannot trust stage-1 to be in a correct state at that
+                * For CPUs that are affected by ARM errata 1165522 or 1530923,
+                * we cannot trust stage-1 to be in a correct state at that
                 * point. Since we do not want to force a full load of the
                 * vcpu state, we prevent the EL1 page-table walker to
                 * allocate new TLBs. This is done by setting the EPD bits
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
                                                  struct tlb_inv_context *cxt)
 {
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
@@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
        isb();
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
                /* Restore the registers to what they were */
                write_sysreg_el1(cxt->tcr, SYS_TCR);
                write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
 {
        write_sysreg(0, vttbr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                /* Ensure write of the host VMID */
                isb();
                /* Restore the host's TCR_EL1 */
index 9f21659..3e909b1 100644 (file)
@@ -1424,7 +1424,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        ID_SANITISED(ID_ISAR4_EL1),
        ID_SANITISED(ID_ISAR5_EL1),
        ID_SANITISED(ID_MMFR4_EL1),
-       ID_UNALLOCATED(2,7),
+       ID_SANITISED(ID_ISAR6_EL1),
 
        /* CRm=3 */
        ID_SANITISED(MVFR0_EL1),
index c21b936..2fc2534 100644 (file)
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 lib-y          := clear_user.o delay.o copy_from_user.o                \
                   copy_to_user.o copy_in_user.o copy_page.o            \
-                  clear_page.o memchr.o memcpy.o memmove.o memset.o    \
-                  memcmp.o strcmp.o strncmp.o strlen.o strnlen.o       \
-                  strchr.o strrchr.o tishift.o
+                  clear_page.o csum.o memchr.o memcpy.o memmove.o      \
+                  memset.o memcmp.o strcmp.o strncmp.o strlen.o        \
+                  strnlen.o strchr.o strrchr.o tishift.o
 
 ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
 obj-$(CONFIG_XOR_BLOCKS)       += xor-neon.o
index 78a9ef6..073acbf 100644 (file)
@@ -14,7 +14,7 @@
  * Parameters:
  *     x0 - dest
  */
-ENTRY(clear_page)
+SYM_FUNC_START(clear_page)
        mrs     x1, dczid_el0
        and     w1, w1, #0xf
        mov     x2, #4
@@ -25,5 +25,5 @@ ENTRY(clear_page)
        tst     x0, #(PAGE_SIZE - 1)
        b.ne    1b
        ret
-ENDPROC(clear_page)
+SYM_FUNC_END(clear_page)
 EXPORT_SYMBOL(clear_page)
index aeafc03..48a3a26 100644 (file)
@@ -19,7 +19,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__arch_clear_user)
+SYM_FUNC_START(__arch_clear_user)
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
@@ -40,7 +40,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:     mov     x0, #0
        ret
-ENDPROC(__arch_clear_user)
+SYM_FUNC_END(__arch_clear_user)
 EXPORT_SYMBOL(__arch_clear_user)
 
        .section .fixup,"ax"
index ebb3c06..8e25e89 100644 (file)
        .endm
 
 end    .req    x5
-ENTRY(__arch_copy_from_user)
+SYM_FUNC_START(__arch_copy_from_user)
        add     end, x0, x2
 #include "copy_template.S"
        mov     x0, #0                          // Nothing to copy
        ret
-ENDPROC(__arch_copy_from_user)
+SYM_FUNC_END(__arch_copy_from_user)
 EXPORT_SYMBOL(__arch_copy_from_user)
 
        .section .fixup,"ax"
index 3d8153a..6671390 100644 (file)
 
 end    .req    x5
 
-ENTRY(__arch_copy_in_user)
+SYM_FUNC_START(__arch_copy_in_user)
        add     end, x0, x2
 #include "copy_template.S"
        mov     x0, #0
        ret
-ENDPROC(__arch_copy_in_user)
+SYM_FUNC_END(__arch_copy_in_user)
 EXPORT_SYMBOL(__arch_copy_in_user)
 
        .section .fixup,"ax"
index bbb8562..e7a7939 100644 (file)
@@ -17,7 +17,7 @@
  *     x0 - dest
  *     x1 - src
  */
-ENTRY(copy_page)
+SYM_FUNC_START(copy_page)
 alternative_if ARM64_HAS_NO_HW_PREFETCH
        // Prefetch three cache lines ahead.
        prfm    pldl1strm, [x1, #128]
@@ -34,46 +34,46 @@ alternative_else_nop_endif
        ldp     x14, x15, [x1, #96]
        ldp     x16, x17, [x1, #112]
 
-       mov     x18, #(PAGE_SIZE - 128)
+       add     x0, x0, #256
        add     x1, x1, #128
 1:
-       subs    x18, x18, #128
+       tst     x0, #(PAGE_SIZE - 1)
 
 alternative_if ARM64_HAS_NO_HW_PREFETCH
        prfm    pldl1strm, [x1, #384]
 alternative_else_nop_endif
 
-       stnp    x2, x3, [x0]
+       stnp    x2, x3, [x0, #-256]
        ldp     x2, x3, [x1]
-       stnp    x4, x5, [x0, #16]
+       stnp    x4, x5, [x0, #16 - 256]
        ldp     x4, x5, [x1, #16]
-       stnp    x6, x7, [x0, #32]
+       stnp    x6, x7, [x0, #32 - 256]
        ldp     x6, x7, [x1, #32]
-       stnp    x8, x9, [x0, #48]
+       stnp    x8, x9, [x0, #48 - 256]
        ldp     x8, x9, [x1, #48]
-       stnp    x10, x11, [x0, #64]
+       stnp    x10, x11, [x0, #64 - 256]
        ldp     x10, x11, [x1, #64]
-       stnp    x12, x13, [x0, #80]
+       stnp    x12, x13, [x0, #80 - 256]
        ldp     x12, x13, [x1, #80]
-       stnp    x14, x15, [x0, #96]
+       stnp    x14, x15, [x0, #96 - 256]
        ldp     x14, x15, [x1, #96]
-       stnp    x16, x17, [x0, #112]
+       stnp    x16, x17, [x0, #112 - 256]
        ldp     x16, x17, [x1, #112]
 
        add     x0, x0, #128
        add     x1, x1, #128
 
-       b.gt    1b
+       b.ne    1b
 
-       stnp    x2, x3, [x0]
-       stnp    x4, x5, [x0, #16]
-       stnp    x6, x7, [x0, #32]
-       stnp    x8, x9, [x0, #48]
-       stnp    x10, x11, [x0, #64]
-       stnp    x12, x13, [x0, #80]
-       stnp    x14, x15, [x0, #96]
-       stnp    x16, x17, [x0, #112]
+       stnp    x2, x3, [x0, #-256]
+       stnp    x4, x5, [x0, #16 - 256]
+       stnp    x6, x7, [x0, #32 - 256]
+       stnp    x8, x9, [x0, #48 - 256]
+       stnp    x10, x11, [x0, #64 - 256]
+       stnp    x12, x13, [x0, #80 - 256]
+       stnp    x14, x15, [x0, #96 - 256]
+       stnp    x16, x17, [x0, #112 - 256]
 
        ret
-ENDPROC(copy_page)
+SYM_FUNC_END(copy_page)
 EXPORT_SYMBOL(copy_page)
index 357eae2..1a104d0 100644 (file)
        .endm
 
 end    .req    x5
-ENTRY(__arch_copy_to_user)
+SYM_FUNC_START(__arch_copy_to_user)
        add     end, x0, x2
 #include "copy_template.S"
        mov     x0, #0
        ret
-ENDPROC(__arch_copy_to_user)
+SYM_FUNC_END(__arch_copy_to_user)
 EXPORT_SYMBOL(__arch_copy_to_user)
 
        .section .fixup,"ax"
index e6135f1..243e107 100644 (file)
@@ -85,17 +85,17 @@ CPU_BE(     rev16           w3, w3          )
        .endm
 
        .align          5
-ENTRY(crc32_le)
+SYM_FUNC_START(crc32_le)
 alternative_if_not ARM64_HAS_CRC32
        b               crc32_le_base
 alternative_else_nop_endif
        __crc32
-ENDPROC(crc32_le)
+SYM_FUNC_END(crc32_le)
 
        .align          5
-ENTRY(__crc32c_le)
+SYM_FUNC_START(__crc32c_le)
 alternative_if_not ARM64_HAS_CRC32
        b               __crc32c_le_base
 alternative_else_nop_endif
        __crc32         c
-ENDPROC(__crc32c_le)
+SYM_FUNC_END(__crc32c_le)
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
new file mode 100644 (file)
index 0000000..1f82c66
--- /dev/null
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 Arm Ltd.
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <net/checksum.h>
+
+/* Looks dumb, but generates nice-ish code */
+static u64 accumulate(u64 sum, u64 data)
+{
+       __uint128_t tmp = (__uint128_t)sum + data;
+       return tmp + (tmp >> 64);
+}
+
+unsigned int do_csum(const unsigned char *buff, int len)
+{
+       unsigned int offset, shift, sum;
+       const u64 *ptr;
+       u64 data, sum64 = 0;
+
+       if (unlikely(len == 0))
+               return 0;
+
+       offset = (unsigned long)buff & 7;
+       /*
+        * This is to all intents and purposes safe, since rounding down cannot
+        * result in a different page or cache line being accessed, and @buff
+        * should absolutely not be pointing to anything read-sensitive. We do,
+        * however, have to be careful not to piss off KASAN, which means using
+        * unchecked reads to accommodate the head and tail, for which we'll
+        * compensate with an explicit check up-front.
+        */
+       kasan_check_read(buff, len);
+       ptr = (u64 *)(buff - offset);
+       len = len + offset - 8;
+
+       /*
+        * Head: zero out any excess leading bytes. Shifting back by the same
+        * amount should be at least as fast as any other way of handling the
+        * odd/even alignment, and means we can ignore it until the very end.
+        */
+       shift = offset * 8;
+       data = READ_ONCE_NOCHECK(*ptr++);
+#ifdef __LITTLE_ENDIAN
+       data = (data >> shift) << shift;
+#else
+       data = (data << shift) >> shift;
+#endif
+
+       /*
+        * Body: straightforward aligned loads from here on (the paired loads
+        * underlying the quadword type still only need dword alignment). The
+        * main loop strictly excludes the tail, so the second loop will always
+        * run at least once.
+        */
+       while (unlikely(len > 64)) {
+               __uint128_t tmp1, tmp2, tmp3, tmp4;
+
+               tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+               tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
+               tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
+               tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+
+               len -= 64;
+               ptr += 8;
+
+               /* This is the "don't dump the carry flag into a GPR" idiom */
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               tmp2 += (tmp2 >> 64) | (tmp2 << 64);
+               tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+               tmp4 += (tmp4 >> 64) | (tmp4 << 64);
+               tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
+               tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+               tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               tmp1 = ((tmp1 >> 64) << 64) | sum64;
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               sum64 = tmp1 >> 64;
+       }
+       while (len > 8) {
+               __uint128_t tmp;
+
+               sum64 = accumulate(sum64, data);
+               tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+
+               len -= 16;
+               ptr += 2;
+
+#ifdef __LITTLE_ENDIAN
+               data = tmp >> 64;
+               sum64 = accumulate(sum64, tmp);
+#else
+               data = tmp;
+               sum64 = accumulate(sum64, tmp >> 64);
+#endif
+       }
+       if (len > 0) {
+               sum64 = accumulate(sum64, data);
+               data = READ_ONCE_NOCHECK(*ptr);
+               len -= 8;
+       }
+       /*
+        * Tail: zero any over-read bytes similarly to the head, again
+        * preserving odd/even alignment.
+        */
+       shift = len * -8;
+#ifdef __LITTLE_ENDIAN
+       data = (data << shift) >> shift;
+#else
+       data = (data >> shift) << shift;
+#endif
+       sum64 = accumulate(sum64, data);
+
+       /* Finally, folding */
+       sum64 += (sum64 >> 32) | (sum64 << 32);
+       sum = sum64 >> 32;
+       sum += (sum >> 16) | (sum << 16);
+       if (offset & 1)
+               return (u16)swab32(sum);
+
+       return sum >> 16;
+}
index 48a3ab6..edf6b97 100644 (file)
@@ -19,7 +19,7 @@
  * Returns:
  *     x0 - address of first occurrence of 'c' or 0
  */
-WEAK(memchr)
+SYM_FUNC_START_WEAK_PI(memchr)
        and     w1, w1, #0xff
 1:     subs    x2, x2, #1
        b.mi    2f
@@ -30,5 +30,5 @@ WEAK(memchr)
        ret
 2:     mov     x0, #0
        ret
-ENDPIPROC(memchr)
+SYM_FUNC_END_PI(memchr)
 EXPORT_SYMBOL_NOKASAN(memchr)
index b297bda..c0671e7 100644 (file)
@@ -46,7 +46,7 @@ pos           .req    x11
 limit_wd       .req    x12
 mask           .req    x13
 
-WEAK(memcmp)
+SYM_FUNC_START_WEAK_PI(memcmp)
        cbz     limit, .Lret0
        eor     tmp1, src1, src2
        tst     tmp1, #7
@@ -243,5 +243,5 @@ CPU_LE( rev data2, data2 )
 .Lret0:
        mov     result, #0
        ret
-ENDPIPROC(memcmp)
+SYM_FUNC_END_PI(memcmp)
 EXPORT_SYMBOL_NOKASAN(memcmp)
index d79f489..9f382ad 100644 (file)
        .endm
 
        .weak memcpy
-ENTRY(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
+SYM_FUNC_START_PI(memcpy)
 #include "copy_template.S"
        ret
-ENDPIPROC(memcpy)
+SYM_FUNC_END_PI(memcpy)
 EXPORT_SYMBOL(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(__memcpy)
index 7847751..02cda2e 100644 (file)
@@ -46,8 +46,8 @@ D_l   .req    x13
 D_h    .req    x14
 
        .weak memmove
-ENTRY(__memmove)
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_PI(memmove)
        cmp     dstin, src
        b.lo    __memcpy
        add     tmp1, src, count
@@ -184,7 +184,7 @@ ENTRY(memmove)
        tst     count, #0x3f
        b.ne    .Ltail63
        ret
-ENDPIPROC(memmove)
+SYM_FUNC_END_PI(memmove)
 EXPORT_SYMBOL(memmove)
-ENDPROC(__memmove)
+SYM_FUNC_END_ALIAS(__memmove)
 EXPORT_SYMBOL(__memmove)
index 9fb97e6..77c3c7b 100644 (file)
@@ -43,8 +43,8 @@ tmp3w         .req    w9
 tmp3           .req    x9
 
        .weak memset
-ENTRY(__memset)
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(__memset)
+SYM_FUNC_START_PI(memset)
        mov     dst, dstin      /* Preserve return value.  */
        and     A_lw, val, #255
        orr     A_lw, A_lw, A_lw, lsl #8
@@ -203,7 +203,7 @@ ENTRY(memset)
        ands    count, count, zva_bits_x
        b.ne    .Ltail_maybe_long
        ret
-ENDPIPROC(memset)
+SYM_FUNC_END_PI(memset)
 EXPORT_SYMBOL(memset)
-ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(__memset)
 EXPORT_SYMBOL(__memset)
index ca3ec18..1f47eae 100644 (file)
@@ -18,7 +18,7 @@
  * Returns:
  *     x0 - address of first occurrence of 'c' or 0
  */
-WEAK(strchr)
+SYM_FUNC_START_WEAK(strchr)
        and     w1, w1, #0xff
 1:     ldrb    w2, [x0], #1
        cmp     w2, w1
@@ -28,5 +28,5 @@ WEAK(strchr)
        cmp     w2, w1
        csel    x0, x0, xzr, eq
        ret
-ENDPROC(strchr)
+SYM_FUNC_END(strchr)
 EXPORT_SYMBOL_NOKASAN(strchr)
index e9aefbe..4767540 100644 (file)
@@ -48,7 +48,7 @@ tmp3          .req    x9
 zeroones       .req    x10
 pos            .req    x11
 
-WEAK(strcmp)
+SYM_FUNC_START_WEAK_PI(strcmp)
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
        tst     tmp1, #7
@@ -219,5 +219,5 @@ CPU_BE(     orr     syndrome, diff, has_nul )
        lsr     data1, data1, #56
        sub     result, data1, data2, lsr #56
        ret
-ENDPIPROC(strcmp)
+SYM_FUNC_END_PI(strcmp)
 EXPORT_SYMBOL_NOKASAN(strcmp)
index 87b0cb0..ee3ed88 100644 (file)
@@ -44,7 +44,7 @@ pos           .req    x12
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
-WEAK(strlen)
+SYM_FUNC_START_WEAK_PI(strlen)
        mov     zeroones, #REP8_01
        bic     src, srcin, #15
        ands    tmp1, srcin, #15
@@ -111,5 +111,5 @@ CPU_LE( lsr tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
        csinv   data1, data1, xzr, le
        csel    data2, data2, data2a, le
        b       .Lrealigned
-ENDPIPROC(strlen)
+SYM_FUNC_END_PI(strlen)
 EXPORT_SYMBOL_NOKASAN(strlen)
index f571581..2a7ee94 100644 (file)
@@ -52,7 +52,7 @@ limit_wd      .req    x13
 mask           .req    x14
 endloop                .req    x15
 
-WEAK(strncmp)
+SYM_FUNC_START_WEAK_PI(strncmp)
        cbz     limit, .Lret0
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
@@ -295,5 +295,5 @@ CPU_BE( orr syndrome, diff, has_nul )
 .Lret0:
        mov     result, #0
        ret
-ENDPIPROC(strncmp)
+SYM_FUNC_END_PI(strncmp)
 EXPORT_SYMBOL_NOKASAN(strncmp)
index c0bac94..b72913a 100644 (file)
@@ -47,7 +47,7 @@ limit_wd      .req    x14
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
-WEAK(strnlen)
+SYM_FUNC_START_WEAK_PI(strnlen)
        cbz     limit, .Lhit_limit
        mov     zeroones, #REP8_01
        bic     src, srcin, #15
@@ -156,5 +156,5 @@ CPU_LE( lsr tmp2, tmp2, tmp4 )      /* Shift (tmp1 & 63).  */
 .Lhit_limit:
        mov     len, limit
        ret
-ENDPIPROC(strnlen)
+SYM_FUNC_END_PI(strnlen)
 EXPORT_SYMBOL_NOKASAN(strnlen)
index 794ac49..13132d1 100644 (file)
@@ -18,7 +18,7 @@
  * Returns:
  *     x0 - address of last occurrence of 'c' or 0
  */
-WEAK(strrchr)
+SYM_FUNC_START_WEAK_PI(strrchr)
        mov     x3, #0
        and     w1, w1, #0xff
 1:     ldrb    w2, [x0], #1
@@ -29,5 +29,5 @@ WEAK(strrchr)
        b       1b
 2:     mov     x0, x3
        ret
-ENDPIPROC(strrchr)
+SYM_FUNC_END_PI(strrchr)
 EXPORT_SYMBOL_NOKASAN(strrchr)
index 0476225..a886138 100644 (file)
@@ -7,7 +7,7 @@
 
 #include <asm/assembler.h>
 
-ENTRY(__ashlti3)
+SYM_FUNC_START(__ashlti3)
        cbz     x2, 1f
        mov     x3, #64
        sub     x3, x3, x2
@@ -26,10 +26,10 @@ ENTRY(__ashlti3)
        lsl     x1, x0, x1
        mov     x0, x2
        ret
-ENDPROC(__ashlti3)
+SYM_FUNC_END(__ashlti3)
 EXPORT_SYMBOL(__ashlti3)
 
-ENTRY(__ashrti3)
+SYM_FUNC_START(__ashrti3)
        cbz     x2, 1f
        mov     x3, #64
        sub     x3, x3, x2
@@ -48,10 +48,10 @@ ENTRY(__ashrti3)
        asr     x0, x1, x0
        mov     x1, x2
        ret
-ENDPROC(__ashrti3)
+SYM_FUNC_END(__ashrti3)
 EXPORT_SYMBOL(__ashrti3)
 
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
        cbz     x2, 1f
        mov     x3, #64
        sub     x3, x3, x2
@@ -70,5 +70,5 @@ ENTRY(__lshrti3)
        lsr     x0, x1, x0
        mov     x1, x2
        ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__lshrti3)
 EXPORT_SYMBOL(__lshrti3)
index db767b0..2d881f3 100644 (file)
@@ -24,7 +24,7 @@
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-ENTRY(__flush_icache_range)
+SYM_FUNC_START(__flush_icache_range)
        /* FALLTHROUGH */
 
 /*
@@ -37,7 +37,7 @@ ENTRY(__flush_icache_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-ENTRY(__flush_cache_user_range)
+SYM_FUNC_START(__flush_cache_user_range)
        uaccess_ttbr0_enable x2, x3, x4
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
@@ -66,8 +66,8 @@ alternative_else_nop_endif
 9:
        mov     x0, #-EFAULT
        b       1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+SYM_FUNC_END(__flush_icache_range)
+SYM_FUNC_END(__flush_cache_user_range)
 
 /*
  *     invalidate_icache_range(start,end)
@@ -77,7 +77,7 @@ ENDPROC(__flush_cache_user_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-ENTRY(invalidate_icache_range)
+SYM_FUNC_START(invalidate_icache_range)
 alternative_if ARM64_HAS_CACHE_DIC
        mov     x0, xzr
        isb
@@ -94,7 +94,7 @@ alternative_else_nop_endif
 2:
        mov     x0, #-EFAULT
        b       1b
-ENDPROC(invalidate_icache_range)
+SYM_FUNC_END(invalidate_icache_range)
 
 /*
  *     __flush_dcache_area(kaddr, size)
@@ -105,10 +105,10 @@ ENDPROC(invalidate_icache_range)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__flush_dcache_area)
+SYM_FUNC_START_PI(__flush_dcache_area)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__flush_dcache_area)
+SYM_FUNC_END_PI(__flush_dcache_area)
 
 /*
  *     __clean_dcache_area_pou(kaddr, size)
@@ -119,14 +119,14 @@ ENDPIPROC(__flush_dcache_area)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__clean_dcache_area_pou)
+SYM_FUNC_START(__clean_dcache_area_pou)
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        ret
 alternative_else_nop_endif
        dcache_by_line_op cvau, ish, x0, x1, x2, x3
        ret
-ENDPROC(__clean_dcache_area_pou)
+SYM_FUNC_END(__clean_dcache_area_pou)
 
 /*
  *     __inval_dcache_area(kaddr, size)
@@ -138,7 +138,8 @@ ENDPROC(__clean_dcache_area_pou)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__inval_dcache_area)
+SYM_FUNC_START_LOCAL(__dma_inv_area)
+SYM_FUNC_START_PI(__inval_dcache_area)
        /* FALLTHROUGH */
 
 /*
@@ -146,7 +147,6 @@ ENTRY(__inval_dcache_area)
  *     - start   - virtual start address of region
  *     - size    - size in question
  */
-__dma_inv_area:
        add     x1, x1, x0
        dcache_line_size x2, x3
        sub     x3, x2, #1
@@ -165,8 +165,8 @@ __dma_inv_area:
        b.lo    2b
        dsb     sy
        ret
-ENDPIPROC(__inval_dcache_area)
-ENDPROC(__dma_inv_area)
+SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END(__dma_inv_area)
 
 /*
  *     __clean_dcache_area_poc(kaddr, size)
@@ -177,7 +177,8 @@ ENDPROC(__dma_inv_area)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__clean_dcache_area_poc)
+SYM_FUNC_START_LOCAL(__dma_clean_area)
+SYM_FUNC_START_PI(__clean_dcache_area_poc)
        /* FALLTHROUGH */
 
 /*
@@ -185,11 +186,10 @@ ENTRY(__clean_dcache_area_poc)
  *     - start   - virtual start address of region
  *     - size    - size in question
  */
-__dma_clean_area:
        dcache_by_line_op cvac, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__clean_dcache_area_poc)
-ENDPROC(__dma_clean_area)
+SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END(__dma_clean_area)
 
 /*
  *     __clean_dcache_area_pop(kaddr, size)
@@ -200,13 +200,13 @@ ENDPROC(__dma_clean_area)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(__clean_dcache_area_pop)
        alternative_if_not ARM64_HAS_DCPOP
        b       __clean_dcache_area_poc
        alternative_else_nop_endif
        dcache_by_line_op cvap, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(__clean_dcache_area_pop)
 
 /*
  *     __dma_flush_area(start, size)
@@ -216,10 +216,10 @@ ENDPIPROC(__clean_dcache_area_pop)
  *     - start   - virtual start address of region
  *     - size    - size in question
  */
-ENTRY(__dma_flush_area)
+SYM_FUNC_START_PI(__dma_flush_area)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__dma_flush_area)
+SYM_FUNC_END_PI(__dma_flush_area)
 
 /*
  *     __dma_map_area(start, size, dir)
@@ -227,11 +227,11 @@ ENDPIPROC(__dma_flush_area)
  *     - size  - size of region
  *     - dir   - DMA direction
  */
-ENTRY(__dma_map_area)
+SYM_FUNC_START_PI(__dma_map_area)
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_area
        b       __dma_clean_area
-ENDPIPROC(__dma_map_area)
+SYM_FUNC_END_PI(__dma_map_area)
 
 /*
  *     __dma_unmap_area(start, size, dir)
@@ -239,8 +239,8 @@ ENDPIPROC(__dma_map_area)
  *     - size  - size of region
  *     - dir   - DMA direction
  */
-ENTRY(__dma_unmap_area)
+SYM_FUNC_START_PI(__dma_unmap_area)
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_area
        ret
-ENDPIPROC(__dma_unmap_area)
+SYM_FUNC_END_PI(__dma_unmap_area)
index b5e329f..8ef73e8 100644 (file)
@@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending;
 #define ASID_MASK              (~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION     (1UL << asid_bits)
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS         (ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid)         (((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx)          (((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS         (ASID_FIRST_VERSION)
+#define NUM_USER_ASIDS         ASID_FIRST_VERSION
 #define asid2idx(asid)         ((asid) & ~ASID_MASK)
 #define idx2asid(idx)          asid2idx(idx)
-#endif
 
 /* Get the ASIDBits supported by the current CPU */
 static u32 get_cpu_asid_bits(void)
@@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void)
        }
 }
 
+static void set_kpti_asid_bits(void)
+{
+       unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+       /*
+        * In case of KPTI kernel/user ASIDs are allocated in
+        * pairs, the bottom bit distinguishes the two: if it
+        * is set, then the ASID will map only userspace. Thus
+        * mark even as reserved for kernel.
+        */
+       memset(asid_map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+       if (arm64_kernel_unmapped_at_el0())
+               set_kpti_asid_bits();
+       else
+               bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
 static void flush_context(void)
 {
        int i;
        u64 asid;
 
        /* Update the list of reserved ASIDs and the ASID bitmap. */
-       bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+       set_reserved_asid_bits();
 
        for_each_possible_cpu(i) {
                asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
@@ -261,6 +275,14 @@ static int asids_init(void)
                panic("Failed to allocate bitmap for %lu ASIDs\n",
                      NUM_USER_ASIDS);
 
+       /*
+        * We cannot call set_reserved_asid_bits() here because CPU
+        * caps are not finalized yet, so it is safer to assume KPTI
+        * and reserve kernel ASID's from beginning.
+        */
+       if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+               set_kpti_asid_bits();
+
        pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
        return 0;
 }
index 9ce7bd9..250c490 100644 (file)
@@ -54,7 +54,7 @@ static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr;
-       unsigned long size = PAGE_SIZE*numpages;
+       unsigned long size = PAGE_SIZE * numpages;
        unsigned long end = start + size;
        struct vm_struct *area;
        int i;
index a1e0592..aafed69 100644 (file)
 #define TCR_KASAN_FLAGS 0
 #endif
 
-#define MAIR(attr, mt) ((attr) << ((mt) * 8))
+/* Default MAIR_EL1 */
+#define MAIR_EL1_SET                                                   \
+       (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) |      \
+        MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) |        \
+        MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) |            \
+        MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) |              \
+        MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) |                    \
+        MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
 
 #ifdef CONFIG_CPU_PM
 /**
@@ -50,7 +57,7 @@
  *
  * x0: virtual address of context pointer
  */
-ENTRY(cpu_do_suspend)
+SYM_FUNC_START(cpu_do_suspend)
        mrs     x2, tpidr_el0
        mrs     x3, tpidrro_el0
        mrs     x4, contextidr_el1
@@ -74,7 +81,7 @@ alternative_endif
        stp     x10, x11, [x0, #64]
        stp     x12, x13, [x0, #80]
        ret
-ENDPROC(cpu_do_suspend)
+SYM_FUNC_END(cpu_do_suspend)
 
 /**
  * cpu_do_resume - restore CPU register context
@@ -82,7 +89,7 @@ ENDPROC(cpu_do_suspend)
  * x0: Address of context pointer
  */
        .pushsection ".idmap.text", "awx"
-ENTRY(cpu_do_resume)
+SYM_FUNC_START(cpu_do_resume)
        ldp     x2, x3, [x0]
        ldp     x4, x5, [x0, #16]
        ldp     x6, x8, [x0, #32]
@@ -131,7 +138,7 @@ alternative_else_nop_endif
 
        isb
        ret
-ENDPROC(cpu_do_resume)
+SYM_FUNC_END(cpu_do_resume)
        .popsection
 #endif
 
@@ -142,7 +149,7 @@ ENDPROC(cpu_do_resume)
  *
  *     - pgd_phys - physical address of new TTB
  */
-ENTRY(cpu_do_switch_mm)
+SYM_FUNC_START(cpu_do_switch_mm)
        mrs     x2, ttbr1_el1
        mmid    x1, x1                          // get mm->context.id
        phys_to_ttbr x3, x0
@@ -161,7 +168,7 @@ alternative_else_nop_endif
        msr     ttbr0_el1, x3                   // now update TTBR0
        isb
        b       post_ttbr_update_workaround     // Back to C code...
-ENDPROC(cpu_do_switch_mm)
+SYM_FUNC_END(cpu_do_switch_mm)
 
        .pushsection ".idmap.text", "awx"
 
@@ -182,7 +189,7 @@ ENDPROC(cpu_do_switch_mm)
  * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
  * called by anything else. It can only be executed from a TTBR0 mapping.
  */
-ENTRY(idmap_cpu_replace_ttbr1)
+SYM_FUNC_START(idmap_cpu_replace_ttbr1)
        save_and_disable_daif flags=x2
 
        __idmap_cpu_set_reserved_ttbr1 x1, x3
@@ -194,7 +201,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
        restore_daif x2
 
        ret
-ENDPROC(idmap_cpu_replace_ttbr1)
+SYM_FUNC_END(idmap_cpu_replace_ttbr1)
        .popsection
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
@@ -222,7 +229,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
  */
 __idmap_kpti_flag:
        .long   1
-ENTRY(idmap_kpti_install_ng_mappings)
+SYM_FUNC_START(idmap_kpti_install_ng_mappings)
        cpu             .req    w0
        num_cpus        .req    w1
        swapper_pa      .req    x2
@@ -250,15 +257,15 @@ ENTRY(idmap_kpti_install_ng_mappings)
        /* We're the boot CPU. Wait for the others to catch up */
        sevl
 1:     wfe
-       ldaxr   w18, [flag_ptr]
-       eor     w18, w18, num_cpus
-       cbnz    w18, 1b
+       ldaxr   w17, [flag_ptr]
+       eor     w17, w17, num_cpus
+       cbnz    w17, 1b
 
        /* We need to walk swapper, so turn off the MMU. */
        pre_disable_mmu_workaround
-       mrs     x18, sctlr_el1
-       bic     x18, x18, #SCTLR_ELx_M
-       msr     sctlr_el1, x18
+       mrs     x17, sctlr_el1
+       bic     x17, x17, #SCTLR_ELx_M
+       msr     sctlr_el1, x17
        isb
 
        /* Everybody is enjoying the idmap, so we can rewrite swapper. */
@@ -281,9 +288,9 @@ skip_pgd:
        isb
 
        /* We're done: fire up the MMU again */
-       mrs     x18, sctlr_el1
-       orr     x18, x18, #SCTLR_ELx_M
-       msr     sctlr_el1, x18
+       mrs     x17, sctlr_el1
+       orr     x17, x17, #SCTLR_ELx_M
+       msr     sctlr_el1, x17
        isb
 
        /*
@@ -353,47 +360,48 @@ skip_pte:
        b.ne    do_pte
        b       next_pmd
 
+       .unreq  cpu
+       .unreq  num_cpus
+       .unreq  swapper_pa
+       .unreq  cur_pgdp
+       .unreq  end_pgdp
+       .unreq  pgd
+       .unreq  cur_pudp
+       .unreq  end_pudp
+       .unreq  pud
+       .unreq  cur_pmdp
+       .unreq  end_pmdp
+       .unreq  pmd
+       .unreq  cur_ptep
+       .unreq  end_ptep
+       .unreq  pte
+
        /* Secondary CPUs end up here */
 __idmap_kpti_secondary:
        /* Uninstall swapper before surgery begins */
-       __idmap_cpu_set_reserved_ttbr1 x18, x17
+       __idmap_cpu_set_reserved_ttbr1 x16, x17
 
        /* Increment the flag to let the boot CPU we're ready */
-1:     ldxr    w18, [flag_ptr]
-       add     w18, w18, #1
-       stxr    w17, w18, [flag_ptr]
+1:     ldxr    w16, [flag_ptr]
+       add     w16, w16, #1
+       stxr    w17, w16, [flag_ptr]
        cbnz    w17, 1b
 
        /* Wait for the boot CPU to finish messing around with swapper */
        sevl
 1:     wfe
-       ldxr    w18, [flag_ptr]
-       cbnz    w18, 1b
+       ldxr    w16, [flag_ptr]
+       cbnz    w16, 1b
 
        /* All done, act like nothing happened */
-       offset_ttbr1 swapper_ttb, x18
+       offset_ttbr1 swapper_ttb, x16
        msr     ttbr1_el1, swapper_ttb
        isb
        ret
 
-       .unreq  cpu
-       .unreq  num_cpus
-       .unreq  swapper_pa
        .unreq  swapper_ttb
        .unreq  flag_ptr
-       .unreq  cur_pgdp
-       .unreq  end_pgdp
-       .unreq  pgd
-       .unreq  cur_pudp
-       .unreq  end_pudp
-       .unreq  pud
-       .unreq  cur_pmdp
-       .unreq  end_pmdp
-       .unreq  pmd
-       .unreq  cur_ptep
-       .unreq  end_ptep
-       .unreq  pte
-ENDPROC(idmap_kpti_install_ng_mappings)
+SYM_FUNC_END(idmap_kpti_install_ng_mappings)
        .popsection
 #endif
 
@@ -404,7 +412,7 @@ ENDPROC(idmap_kpti_install_ng_mappings)
  *     value of the SCTLR_EL1 register.
  */
        .pushsection ".idmap.text", "awx"
-ENTRY(__cpu_setup)
+SYM_FUNC_START(__cpu_setup)
        tlbi    vmalle1                         // Invalidate local TLB
        dsb     nsh
 
@@ -416,23 +424,9 @@ ENTRY(__cpu_setup)
        enable_dbg                              // since this is per-cpu
        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
        /*
-        * Memory region attributes for LPAE:
-        *
-        *   n = AttrIndx[2:0]
-        *                      n       MAIR
-        *   DEVICE_nGnRnE      000     00000000
-        *   DEVICE_nGnRE       001     00000100
-        *   DEVICE_GRE         010     00001100
-        *   NORMAL_NC          011     01000100
-        *   NORMAL             100     11111111
-        *   NORMAL_WT          101     10111011
+        * Memory region attributes
         */
-       ldr     x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
-                    MAIR(0x04, MT_DEVICE_nGnRE) | \
-                    MAIR(0x0c, MT_DEVICE_GRE) | \
-                    MAIR(0x44, MT_NORMAL_NC) | \
-                    MAIR(0xff, MT_NORMAL) | \
-                    MAIR(0xbb, MT_NORMAL_WT)
+       mov_q   x5, MAIR_EL1_SET
        msr     mair_el1, x5
        /*
         * Prepare SCTLR
@@ -475,4 +469,4 @@ ENTRY(__cpu_setup)
 #endif /* CONFIG_ARM64_HW_AFDBM */
        msr     tcr_el1, x10
        ret                                     // return to head.S
-ENDPROC(__cpu_setup)
+SYM_FUNC_END(__cpu_setup)
index c5f05c4..5b09aca 100644 (file)
 #define XEN_IMM 0xEA1
 
 #define HYPERCALL_SIMPLE(hypercall)            \
-ENTRY(HYPERVISOR_##hypercall)                  \
+SYM_FUNC_START(HYPERVISOR_##hypercall)         \
        mov x16, #__HYPERVISOR_##hypercall;     \
        hvc XEN_IMM;                            \
        ret;                                    \
-ENDPROC(HYPERVISOR_##hypercall)
+SYM_FUNC_END(HYPERVISOR_##hypercall)
 
 #define HYPERCALL0 HYPERCALL_SIMPLE
 #define HYPERCALL1 HYPERCALL_SIMPLE
@@ -86,7 +86,7 @@ HYPERCALL2(multicall);
 HYPERCALL2(vm_assist);
 HYPERCALL3(dm_op);
 
-ENTRY(privcmd_call)
+SYM_FUNC_START(privcmd_call)
        mov x16, x0
        mov x0, x1
        mov x1, x2
@@ -109,4 +109,4 @@ ENTRY(privcmd_call)
         */
        uaccess_ttbr0_disable x6, x7
        ret
-ENDPROC(privcmd_call);
+SYM_FUNC_END(privcmd_call);
index 6663f17..6ad6cda 100644 (file)
@@ -14,6 +14,7 @@ config M68K
        select HAVE_AOUT if MMU
        select HAVE_ASM_MODVERSIONS
        select HAVE_DEBUG_BUGVERBOSE
+       select HAVE_COPY_THREAD_TLS
        select GENERIC_IRQ_SHOW
        select GENERIC_ATOMIC64
        select HAVE_UID16
index 619d30d..e1134c3 100644 (file)
@@ -562,6 +562,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -574,7 +575,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -612,6 +613,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -620,6 +624,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -651,4 +656,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index caa0558..484cb16 100644 (file)
@@ -518,6 +518,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -530,7 +531,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -568,6 +569,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -576,6 +580,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -607,4 +612,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 2551c7e..eb6a46b 100644 (file)
@@ -540,6 +540,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -552,7 +553,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -590,6 +591,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -598,6 +602,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -629,4 +634,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 4ffc1e5..bee9263 100644 (file)
@@ -511,6 +511,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -523,7 +524,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -561,6 +562,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -569,6 +573,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -600,4 +605,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 806da3d..c8847a8 100644 (file)
@@ -520,6 +520,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -532,7 +533,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -570,6 +571,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -578,6 +582,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -609,4 +614,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 250da20..303ffaf 100644 (file)
@@ -542,6 +542,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -554,7 +555,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -592,6 +593,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -600,6 +604,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -631,4 +636,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index b764a03..89a7042 100644 (file)
@@ -628,6 +628,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -640,7 +641,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -678,6 +679,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -686,6 +690,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -717,4 +722,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 7800d3a..f62c1f4 100644 (file)
@@ -510,6 +510,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -522,7 +523,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -560,6 +561,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -568,6 +572,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -599,4 +604,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index c32dc2d..58dcad2 100644 (file)
@@ -511,6 +511,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -523,7 +524,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -561,6 +562,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -569,6 +573,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -600,4 +605,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index bf0a65c..5d3c28d 100644 (file)
@@ -529,6 +529,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -541,7 +542,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -579,6 +580,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -587,6 +591,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -618,4 +623,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 5f3cfa2..5ef9e17 100644 (file)
@@ -513,6 +513,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -525,7 +526,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -563,6 +564,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
index 58354d2..22e1acc 100644 (file)
@@ -512,6 +512,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -524,7 +525,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -562,6 +563,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -570,6 +574,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -601,4 +606,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 2e0047c..4ae5241 100644 (file)
@@ -30,5 +30,6 @@
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE3
 
 #endif /* _ASM_M68K_UNISTD_H_ */
index 97cd3ea..9dd76fb 100644 (file)
@@ -69,6 +69,13 @@ ENTRY(__sys_vfork)
        lea     %sp@(24),%sp
        rts
 
+ENTRY(__sys_clone3)
+       SAVE_SWITCH_STACK
+       pea     %sp@(SWITCH_STACK_SIZE)
+       jbsr    m68k_clone3
+       lea     %sp@(28),%sp
+       rts
+
 ENTRY(sys_sigreturn)
        SAVE_SWITCH_STACK
        movel   %sp,%sp@-                 | switch_stack pointer
index 4e77a06..8f0d914 100644 (file)
@@ -30,8 +30,9 @@
 #include <linux/init_task.h>
 #include <linux/mqueue.h>
 #include <linux/rcupdate.h>
-
+#include <linux/syscalls.h>
 #include <linux/uaccess.h>
+
 #include <asm/traps.h>
 #include <asm/machdep.h>
 #include <asm/setup.h>
@@ -107,20 +108,43 @@ void flush_thread(void)
  * on top of pt_regs, which means that sys_clone() arguments would be
  * buried.  We could, of course, copy them, but it's too costly for no
  * good reason - generic clone() would have to copy them *again* for
- * do_fork() anyway.  So in this case it's actually better to pass pt_regs *
- * and extract arguments for do_fork() from there.  Eventually we might
- * go for calling do_fork() directly from the wrapper, but only after we
- * are finished with do_fork() prototype conversion.
+ * _do_fork() anyway.  So in this case it's actually better to pass pt_regs *
+ * and extract arguments for _do_fork() from there.  Eventually we might
+ * go for calling _do_fork() directly from the wrapper, but only after we
+ * are finished with _do_fork() prototype conversion.
  */
 asmlinkage int m68k_clone(struct pt_regs *regs)
 {
        /* regs will be equal to current_pt_regs() */
-       return do_fork(regs->d1, regs->d2, 0,
-                      (int __user *)regs->d3, (int __user *)regs->d4);
+       struct kernel_clone_args args = {
+               .flags          = regs->d1 & ~CSIGNAL,
+               .pidfd          = (int __user *)regs->d3,
+               .child_tid      = (int __user *)regs->d4,
+               .parent_tid     = (int __user *)regs->d3,
+               .exit_signal    = regs->d1 & CSIGNAL,
+               .stack          = regs->d2,
+               .tls            = regs->d5,
+       };
+
+       if (!legacy_clone_args_valid(&args))
+               return -EINVAL;
+
+       return _do_fork(&args);
+}
+
+/*
+ * Because extra registers are saved on the stack after the sys_clone3()
+ * arguments, this C wrapper extracts them from pt_regs * and then calls the
+ * generic sys_clone3() implementation.
+ */
+asmlinkage int m68k_clone3(struct pt_regs *regs)
+{
+       return sys_clone3((struct clone_args __user *)regs->d1, regs->d2);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-                unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+                   unsigned long arg, struct task_struct *p,
+                   unsigned long tls)
 {
        struct fork_frame {
                struct switch_stack sw;
@@ -155,7 +179,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.usp = usp ?: rdusp();
 
        if (clone_flags & CLONE_SETTLS)
-               task_thread_info(p)->tp_value = frame->regs.d5;
+               task_thread_info(p)->tp_value = tls;
 
 #ifdef CONFIG_FPU
        if (!FPU_IS_EMU) {
index a88a285..a00a5d0 100644 (file)
 432    common  fsmount                         sys_fsmount
 433    common  fspick                          sys_fspick
 434    common  pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    common  clone3                          __sys_clone3
index 1ec34e1..e2a4121 100644 (file)
@@ -455,11 +455,7 @@ config PPC_TRANSACTIONAL_MEM
 config PPC_UV
        bool "Ultravisor support"
        depends on KVM_BOOK3S_HV_POSSIBLE
-       select ZONE_DEVICE
-       select DEV_PAGEMAP_OPS
-       select DEVICE_PRIVATE
-       select MEMORY_HOTPLUG
-       select MEMORY_HOTREMOVE
+       depends on DEVICE_PRIVATE
        default n
        help
          This option paravirtualizes the kernel to run in POWER platforms that
index e1a961f..baa0c50 100644 (file)
@@ -63,6 +63,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy0: ethernet-phy@0 {
                        reg = <0x0>;
index c288f3c..9309560 100644 (file)
@@ -60,6 +60,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy6: ethernet-phy@0 {
                        reg = <0x0>;
index 94f3e71..ff4bd38 100644 (file)
@@ -63,6 +63,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy1: ethernet-phy@0 {
                        reg = <0x0>;
index 94a7698..1fa38ed 100644 (file)
@@ -60,6 +60,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy7: ethernet-phy@0 {
                        reg = <0x0>;
index b5ff5f7..a8cc978 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy0: ethernet-phy@0 {
                        reg = <0x0>;
index ee44182..8b8bd70 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy1: ethernet-phy@0 {
                        reg = <0x0>;
index f05f0d7..619c880 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe5000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy2: ethernet-phy@0 {
                        reg = <0x0>;
index a9114ec..d7ebb73 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe7000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy3: ethernet-phy@0 {
                        reg = <0x0>;
index 44dd00a..b151d69 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe9000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy4: ethernet-phy@0 {
                        reg = <0x0>;
index 5b1b84b..adc0ae0 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xeb000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy5: ethernet-phy@0 {
                        reg = <0x0>;
index 0e1daae..435047e 100644 (file)
@@ -60,6 +60,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy14: ethernet-phy@0 {
                        reg = <0x0>;
index 68c5ef7..c098657 100644 (file)
@@ -60,6 +60,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy15: ethernet-phy@0 {
                        reg = <0x0>;
index 605363c..9d06824 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy8: ethernet-phy@0 {
                        reg = <0x0>;
index 1955dfa..70e9477 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy9: ethernet-phy@0 {
                        reg = <0x0>;
index 2c14764..ad96e65 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe5000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy10: ethernet-phy@0 {
                        reg = <0x0>;
index b8b541f..034bc4b 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe7000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy11: ethernet-phy@0 {
                        reg = <0x0>;
index 4b2cfdd..93ca23d 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe9000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy12: ethernet-phy@0 {
                        reg = <0x0>;
index 0a52ddf..23b3117 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xeb000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy13: ethernet-phy@0 {
                        reg = <0x0>;
index 15b7500..3fa1b96 100644 (file)
@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
  *
  */
 #define MAX_USER_CONTEXT       ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
 #define MIN_USER_CONTEXT       (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
-                                MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
+                                MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
 /*
  * For platforms that support on 65bit VA we limit the context bits
  */
index f2dfcd5..33aee74 100644 (file)
@@ -39,6 +39,7 @@
 
 #define XIVE_ESB_VAL_P         0x2
 #define XIVE_ESB_VAL_Q         0x1
+#define XIVE_ESB_INVALID       0xFF
 
 /*
  * Thread Management (aka "TM") registers
index f5fadbd..9651ca0 100644 (file)
@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
                                  enum irqchip_irq_state which, bool *state)
 {
        struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
+       u8 pq;
 
        switch (which) {
        case IRQCHIP_STATE_ACTIVE:
-               *state = !xd->stale_p &&
-                        (xd->saved_p ||
-                         !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
+               pq = xive_esb_read(xd, XIVE_ESB_GET);
+
+               /*
+                * The esb value being all 1's means we couldn't get
+                * the PQ state of the interrupt through mmio. It may
+                * happen, for example when querying a PHB interrupt
+                * while the PHB is in an error state. We consider the
+                * interrupt to be inactive in that case.
+                */
+               *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
+                       (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
                return 0;
        default:
                return -EINVAL;
index 9e7adcd..e6da1ce 100644 (file)
 
 #if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
 
-int intel_pmc_ipc_simple_command(int cmd, int sub);
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
-               u32 *out, u32 outlen, u32 dptr, u32 sptr);
 int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
                u32 *out, u32 outlen);
 int intel_pmc_s0ix_counter_read(u64 *data);
-int intel_pmc_gcr_read(u32 offset, u32 *data);
 int intel_pmc_gcr_read64(u32 offset, u64 *data);
-int intel_pmc_gcr_write(u32 offset, u32 data);
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val);
 
 #else
 
-static inline int intel_pmc_ipc_simple_command(int cmd, int sub)
-{
-       return -EINVAL;
-}
-
-static inline int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
-               u32 *out, u32 outlen, u32 dptr, u32 sptr)
-{
-       return -EINVAL;
-}
-
 static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
                u32 *out, u32 outlen)
 {
@@ -66,26 +49,11 @@ static inline int intel_pmc_s0ix_counter_read(u64 *data)
        return -EINVAL;
 }
 
-static inline int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
-       return -EINVAL;
-}
-
 static inline int intel_pmc_gcr_read64(u32 offset, u64 *data)
 {
        return -EINVAL;
 }
 
-static inline int intel_pmc_gcr_write(u32 offset, u32 data)
-{
-       return -EINVAL;
-}
-
-static inline int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
-{
-       return -EINVAL;
-}
-
 #endif /*CONFIG_INTEL_PMC_IPC*/
 
 #endif
index 4a8c6e8..2a1442b 100644 (file)
 /* Read single register */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data);
 
-/* Read two sequential registers */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data);
-
-/* Read four sequential registers */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data);
-
 /* Read a vector */
 int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
 
 /* Write single register */
 int intel_scu_ipc_iowrite8(u16 addr, u8 data);
 
-/* Write two sequential registers */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data);
-
-/* Write four sequential registers */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data);
-
 /* Write a vector */
 int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
 
@@ -50,14 +38,6 @@ int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
 int intel_scu_ipc_simple_command(int cmd, int sub);
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
                          u32 *out, int outlen);
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
-                             u32 *out, int outlen, u32 dptr, u32 sptr);
-
-/* I2C control api */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
-
-/* Update FW version */
-int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
 
 extern struct blocking_notifier_head intel_scu_notifier;
 
index 2143948..2f77e31 100644 (file)
@@ -40,13 +40,10 @@ struct telemetry_evtmap {
 struct telemetry_unit_config {
        struct telemetry_evtmap *telem_evts;
        void __iomem *regmap;
-       u32 ssram_base_addr;
        u8 ssram_evts_used;
        u8 curr_period;
        u8 max_period;
        u8 min_period;
-       u32 ssram_size;
-
 };
 
 struct telemetry_plt_config {
index 9997521..e1aa17a 100644 (file)
@@ -399,4 +399,40 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset,
 extern bool phys_mem_access_encrypted(unsigned long phys_addr,
                                      unsigned long size);
 
+/**
+ * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
+ * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @src: source
+ * @count: number of 512 bits quantities to submit
+ *
+ * Submit data from kernel space to MMIO space, in units of 512 bits at a
+ * time.  Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ *
+ * Warning: Do not use this helper unless your driver has checked that the CPU
+ * instruction is supported on the platform.
+ */
+static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+                                   size_t count)
+{
+       /*
+        * Note that this isn't an "on-stack copy", just definition of "dst"
+        * as a pointer to 64-bytes of stuff that is going to be overwritten.
+        * In the MOVDIR64B case that may be needed as you can use the
+        * MOVDIR64B instruction to copy arbitrary memory around. This trick
+        * lets the compiler know how much gets clobbered.
+        */
+       volatile struct { char _[64]; } *dst = __dst;
+       const u8 *from = src;
+       const u8 *end = from + count * 64;
+
+       while (from < end) {
+               /* MOVDIR64B [rdx], rax */
+               asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+                            : "=m" (dst)
+                            : "d" (from), "a" (dst));
+               from += 64;
+       }
+}
+
 #endif /* _ASM_X86_IO_H */
index dc2d4b2..4359b95 100644 (file)
@@ -144,7 +144,7 @@ struct mce_log_buffer {
 
 enum mce_notifier_prios {
        MCE_PRIO_FIRST          = INT_MAX,
-       MCE_PRIO_SRAO           = INT_MAX - 1,
+       MCE_PRIO_UC             = INT_MAX - 1,
        MCE_PRIO_EXTLOG         = INT_MAX - 2,
        MCE_PRIO_NFIT           = INT_MAX - 3,
        MCE_PRIO_EDAC           = INT_MAX - 4,
@@ -290,6 +290,7 @@ extern void apei_mce_report_mem_error(int corrected,
 /* These may be used by multiple smca_hwid_mcatypes */
 enum smca_bank_types {
        SMCA_LS = 0,    /* Load Store */
+       SMCA_LS_V2,     /* Load Store */
        SMCA_IF,        /* Instruction Fetch */
        SMCA_L2_CACHE,  /* L2 Cache */
        SMCA_DE,        /* Decoder Unit */
index 2094928..6685e12 100644 (file)
@@ -53,6 +53,6 @@ static inline void __init load_ucode_amd_bsp(unsigned int family) {}
 static inline void load_ucode_amd_ap(unsigned int family) {}
 static inline int __init
 save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
-void reload_ucode_amd(void) {}
+static inline void reload_ucode_amd(void) {}
 #endif
 #endif /* _ASM_X86_MICROCODE_AMD_H */
index 251c795..69aed0e 100644 (file)
@@ -22,6 +22,7 @@
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+#define PCI_DEVICE_ID_AMD_19H_DF_F4    0x1654
 
 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
 static DEFINE_MUTEX(smn_mutex);
@@ -52,6 +53,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
        {}
 };
 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -66,6 +68,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
        {}
 };
index d6cf5c1..b3a50d9 100644 (file)
@@ -78,6 +78,7 @@ struct smca_bank_name {
 
 static struct smca_bank_name smca_names[] = {
        [SMCA_LS]       = { "load_store",       "Load Store Unit" },
+       [SMCA_LS_V2]    = { "load_store",       "Load Store Unit" },
        [SMCA_IF]       = { "insn_fetch",       "Instruction Fetch Unit" },
        [SMCA_L2_CACHE] = { "l2_cache",         "L2 Cache" },
        [SMCA_DE]       = { "decode_unit",      "Decode Unit" },
@@ -138,6 +139,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
 
        /* ZN Core (HWID=0xB0) MCA types */
        { SMCA_LS,       HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF },
+       { SMCA_LS_V2,    HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF },
        { SMCA_IF,       HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
        { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
        { SMCA_DE,       HWID_MCATYPE(0xB0, 0x3), 0x1FF },
index 2e2a421..2c4f949 100644 (file)
@@ -53,8 +53,6 @@
 
 #include "internal.h"
 
-static DEFINE_MUTEX(mce_log_mutex);
-
 /* sysfs synchronization */
 static DEFINE_MUTEX(mce_sysfs_mutex);
 
@@ -156,19 +154,10 @@ void mce_log(struct mce *m)
        if (!mce_gen_pool_add(m))
                irq_work_queue(&mce_irq_work);
 }
-
-void mce_inject_log(struct mce *m)
-{
-       mutex_lock(&mce_log_mutex);
-       mce_log(m);
-       mutex_unlock(&mce_log_mutex);
-}
-EXPORT_SYMBOL_GPL(mce_inject_log);
-
-static struct notifier_block mce_srao_nb;
+EXPORT_SYMBOL_GPL(mce_log);
 
 /*
- * We run the default notifier if we have only the SRAO, the first and the
+ * We run the default notifier if we have only the UC, the first and the
  * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
  * notifiers registered on the chain.
  */
@@ -594,26 +583,29 @@ static struct notifier_block first_nb = {
        .priority       = MCE_PRIO_FIRST,
 };
 
-static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
-                               void *data)
+static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
+                             void *data)
 {
        struct mce *mce = (struct mce *)data;
        unsigned long pfn;
 
-       if (!mce)
+       if (!mce || !mce_usable_address(mce))
                return NOTIFY_DONE;
 
-       if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
-               pfn = mce->addr >> PAGE_SHIFT;
-               if (!memory_failure(pfn, 0))
-                       set_mce_nospec(pfn);
-       }
+       if (mce->severity != MCE_AO_SEVERITY &&
+           mce->severity != MCE_DEFERRED_SEVERITY)
+               return NOTIFY_DONE;
+
+       pfn = mce->addr >> PAGE_SHIFT;
+       if (!memory_failure(pfn, 0))
+               set_mce_nospec(pfn);
 
        return NOTIFY_OK;
 }
-static struct notifier_block mce_srao_nb = {
-       .notifier_call  = srao_decode_notifier,
-       .priority       = MCE_PRIO_SRAO,
+
+static struct notifier_block mce_uc_nb = {
+       .notifier_call  = uc_decode_notifier,
+       .priority       = MCE_PRIO_UC,
 };
 
 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
@@ -763,26 +755,22 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 log_it:
                error_seen = true;
 
-               mce_read_aux(&m, i);
+               if (flags & MCP_DONTLOG)
+                       goto clear_it;
 
+               mce_read_aux(&m, i);
                m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
-
                /*
                 * Don't get the IP here because it's unlikely to
                 * have anything to do with the actual error location.
                 */
-               if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
-                       mce_log(&m);
-               else if (mce_usable_address(&m)) {
-                       /*
-                        * Although we skipped logging this, we still want
-                        * to take action. Add to the pool so the registered
-                        * notifiers will see it.
-                        */
-                       if (!mce_gen_pool_add(&m))
-                               mce_schedule_work();
-               }
 
+               if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
+                       goto clear_it;
+
+               mce_log(&m);
+
+clear_it:
                /*
                 * Clear state for this bank.
                 */
@@ -807,7 +795,7 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
-       char *tmp;
+       char *tmp = *msg;
        int i;
 
        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
@@ -1232,8 +1220,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
        struct mca_config *cfg = &mca_cfg;
        int cpu = smp_processor_id();
-       char *msg = "Unknown";
        struct mce m, *final;
+       char *msg = NULL;
        int worst = 0;
 
        /*
@@ -1365,7 +1353,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                ist_end_non_atomic();
        } else {
                if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
-                       mce_panic("Failed kernel mode recovery", &m, NULL);
+                       mce_panic("Failed kernel mode recovery", &m, msg);
        }
 
 out_ist:
@@ -2041,7 +2029,7 @@ int __init mcheck_init(void)
 {
        mcheck_intel_therm_init();
        mce_register_decode_chain(&first_nb);
-       mce_register_decode_chain(&mce_srao_nb);
+       mce_register_decode_chain(&mce_uc_nb);
        mce_register_decode_chain(&mce_default_nb);
        mcheck_vendor_init_severity();
 
index 1f30117..3413b41 100644 (file)
@@ -494,7 +494,7 @@ static void do_inject(void)
                i_mce.status |= MCI_STATUS_SYNDV;
 
        if (inj_type == SW_INJ) {
-               mce_inject_log(&i_mce);
+               mce_log(&i_mce);
                return;
        }
 
index 842b273..b785c0d 100644 (file)
@@ -84,8 +84,6 @@ static inline int apei_clear_mce(u64 record_id)
 }
 #endif
 
-void mce_inject_log(struct mce *m);
-
 /*
  * We consider records to be equivalent if bank+status+addr+misc all match.
  * This is only used when the system is going down because of a fatal error
index 6c3e1c9..58b4ee3 100644 (file)
@@ -235,7 +235,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp)
        *temp = (msr_val >> 16) & 0x7F;
 }
 
-static void throttle_active_work(struct work_struct *work)
+static void __maybe_unused throttle_active_work(struct work_struct *work)
 {
        struct _thermal_state *state = container_of(to_delayed_work(work),
                                                struct _thermal_state, therm_work);
index 1d20c9c..564fae7 100644 (file)
@@ -321,6 +321,24 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
        const char *dname;
        int err;
 
+       /*
+        * Partitions are not supported on zoned block devices that are used as
+        * such.
+        */
+       switch (disk->queue->limits.zoned) {
+       case BLK_ZONED_HM:
+               pr_warn("%s: partitions not supported on host managed zoned block device\n",
+                       disk->disk_name);
+               return ERR_PTR(-ENXIO);
+       case BLK_ZONED_HA:
+               pr_info("%s: disabling host aware zoned block device support due to partitions\n",
+                       disk->disk_name);
+               disk->queue->limits.zoned = BLK_ZONED_NONE;
+               break;
+       case BLK_ZONED_NONE:
+               break;
+       }
+
        err = disk_expand_part_tbl(disk, partno);
        if (err)
                return ERR_PTR(err);
@@ -501,7 +519,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
 
        part = add_partition(disk, p, from, size, state->parts[p].flags,
                             &state->parts[p].info);
-       if (IS_ERR(part)) {
+       if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
                printk(KERN_ERR " %s: p%d could not be added: %ld\n",
                       disk->disk_name, p, -PTR_ERR(part));
                return true;
@@ -540,10 +558,10 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
        }
 
        /*
-        * Partitions are not supported on zoned block devices.
+        * Partitions are not supported on host managed zoned block devices.
         */
-       if (bdev_is_zoned(bdev)) {
-               pr_warn("%s: ignoring partition table on zoned block device\n",
+       if (disk->queue->limits.zoned == BLK_ZONED_HM) {
+               pr_warn("%s: ignoring partition table on host managed zoned block device\n",
                        disk->disk_name);
                ret = 0;
                goto out_free_state;
index 33f7198..6078064 100644 (file)
@@ -298,6 +298,59 @@ out:
        return status;
 }
 
+struct iort_workaround_oem_info {
+       char oem_id[ACPI_OEM_ID_SIZE + 1];
+       char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+       u32 oem_revision;
+};
+
+static bool apply_id_count_workaround;
+
+static struct iort_workaround_oem_info wa_info[] __initdata = {
+       {
+               .oem_id         = "HISI  ",
+               .oem_table_id   = "HIP07   ",
+               .oem_revision   = 0,
+       }, {
+               .oem_id         = "HISI  ",
+               .oem_table_id   = "HIP08   ",
+               .oem_revision   = 0,
+       }
+};
+
+static void __init
+iort_check_id_count_workaround(struct acpi_table_header *tbl)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+               if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+                   !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+                   wa_info[i].oem_revision == tbl->oem_revision) {
+                       apply_id_count_workaround = true;
+                       pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
+                       break;
+               }
+       }
+}
+
+static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
+{
+       u32 map_max = map->input_base + map->id_count;
+
+       /*
+        * The IORT specification revision D (Section 3, table 4, page 9) says
+        * Number of IDs = The number of IDs in the range minus one, but the
+        * IORT code ignored the "minus one", and some firmware did that too,
+        * so apply a workaround here to keep compatible with both the spec
+        * compliant and non-spec compliant firmwares.
+        */
+       if (apply_id_count_workaround)
+               map_max--;
+
+       return map_max;
+}
+
 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
                       u32 *rid_out)
 {
@@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
                return -ENXIO;
        }
 
-       if (rid_in < map->input_base ||
-           (rid_in >= map->input_base + map->id_count))
+       if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
                return -ENXIO;
 
        *rid_out = map->output_base + (rid_in - map->input_base);
@@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void)
                return;
        }
 
+       iort_check_id_count_workaround(iort_table);
        iort_init_platform_devices();
 }
index aad00d2..cc87004 100644 (file)
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                        }
                        if (!to) {
                                printk ("No more free channels for FS50..\n");
+                               kfree(vcc);
                                return -EBUSY;
                        }
                        vcc->channo = dev->channo;
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                        if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
                            ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
                                printk ("Channel is in use for FS155.\n");
+                               kfree(vcc);
                                return -EBUSY;
                        }
                }
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                            tc, sizeof (struct fs_transmit_config));
                if (!tc) {
                        fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+                       kfree(vcc);
                        return -ENOMEM;
                }
 
index ac9b31c..008f8da 100644 (file)
@@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
        return i2c_smbus_write_byte_data(i2c, reg, val);
 }
 
-static struct regmap_bus regmap_smbus_byte = {
+static const struct regmap_bus regmap_smbus_byte = {
        .reg_write = regmap_smbus_byte_reg_write,
        .reg_read = regmap_smbus_byte_reg_read,
 };
@@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
        return i2c_smbus_write_word_data(i2c, reg, val);
 }
 
-static struct regmap_bus regmap_smbus_word = {
+static const struct regmap_bus regmap_smbus_word = {
        .reg_write = regmap_smbus_word_reg_write,
        .reg_read = regmap_smbus_word_reg_read,
 };
@@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
        return i2c_smbus_write_word_swapped(i2c, reg, val);
 }
 
-static struct regmap_bus regmap_smbus_word_swapped = {
+static const struct regmap_bus regmap_smbus_word_swapped = {
        .reg_write = regmap_smbus_word_write_swapped,
        .reg_read = regmap_smbus_word_read_swapped,
 };
@@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context,
                return -EIO;
 }
 
-static struct regmap_bus regmap_i2c = {
+static const struct regmap_bus regmap_i2c = {
        .write = regmap_i2c_write,
        .gather_write = regmap_i2c_gather_write,
        .read = regmap_i2c_read,
@@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
                return -EIO;
 }
 
-static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
        .write = regmap_i2c_smbus_i2c_write,
        .read = regmap_i2c_smbus_i2c_read,
        .max_raw_read = I2C_SMBUS_BLOCK_MAX,
index 19f57cc..59f911e 100644 (file)
@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
 
        WARN_ON(!map->bus);
 
-       /* Check for unwritable registers before we start */
-       for (i = 0; i < val_len / map->format.val_bytes; i++)
-               if (!regmap_writeable(map,
-                                    reg + regmap_get_offset(map, i)))
-                       return -EINVAL;
+       /* Check for unwritable or noinc registers in range
+        * before we start
+        */
+       if (!regmap_writeable_noinc(map, reg)) {
+               for (i = 0; i < val_len / map->format.val_bytes; i++) {
+                       unsigned int element =
+                               reg + regmap_get_offset(map, i);
+                       if (!regmap_writeable(map, element) ||
+                               regmap_writeable_noinc(map, element))
+                               return -EINVAL;
+               }
+       }
 
        if (!map->cache_bypass && map->format.parse_val) {
                unsigned int ival;
index 3b53b3e..d52bf4d 100644 (file)
@@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(timeouts);
 
-static struct attribute *tpm_dev_attrs[] = {
+static ssize_t tpm_version_major_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct tpm_chip *chip = to_tpm_chip(dev);
+
+       return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+                      ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
        &dev_attr_pubek.attr,
        &dev_attr_pcrs.attr,
        &dev_attr_enabled.attr,
@@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = {
        &dev_attr_cancel.attr,
        &dev_attr_durations.attr,
        &dev_attr_timeouts.attr,
+       &dev_attr_tpm_version_major.attr,
        NULL,
 };
 
-static const struct attribute_group tpm_dev_group = {
-       .attrs = tpm_dev_attrs,
+static struct attribute *tpm2_dev_attrs[] = {
+       &dev_attr_tpm_version_major.attr,
+       NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+       .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+       .attrs = tpm2_dev_attrs,
 };
 
 void tpm_sysfs_add_device(struct tpm_chip *chip)
 {
-       if (chip->flags & TPM_CHIP_FLAG_TPM2)
-               return;
-
        WARN_ON(chip->groups_cnt != 0);
-       chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+       if (chip->flags & TPM_CHIP_FLAG_TPM2)
+               chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+       else
+               chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
 }
index 6fa1eba..5142da4 100644 (file)
@@ -239,6 +239,14 @@ config FSL_RAID
          the capability to offload memcpy, xor and pq computation
          for raid5/6.
 
+config HISI_DMA
+       tristate "HiSilicon DMA Engine support"
+       depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Support HiSilicon Kunpeng DMA engine.
+
 config IMG_MDC_DMA
        tristate "IMG MDC support"
        depends on MIPS || COMPILE_TEST
@@ -273,6 +281,19 @@ config INTEL_IDMA64
          Enable DMA support for Intel Low Power Subsystem such as found on
          Intel Skylake PCH.
 
+config INTEL_IDXD
+       tristate "Intel Data Accelerators support"
+       depends on PCI && X86_64
+       select DMA_ENGINE
+       select SBITMAP
+       help
+         Enable support for the Intel(R) data accelerators present
+         in Intel Xeon CPU.
+
+         Say Y if you have such a platform.
+
+         If unsure, say N.
+
 config INTEL_IOATDMA
        tristate "Intel I/OAT DMA support"
        depends on PCI && X86_64
@@ -497,6 +518,15 @@ config PXA_DMA
          16 to 32 channels for peripheral to memory or memory to memory
          transfers.
 
+config PLX_DMA
+       tristate "PLX ExpressLane PEX Switch DMA Engine Support"
+       depends on PCI
+       select DMA_ENGINE
+       help
+         Some PLX ExpressLane PCI Switches support additional DMA engines.
+         These are exposed via extra functions on the switch's
+         upstream port. Each function exposes one DMA channel.
+
 config SIRF_DMA
        tristate "CSR SiRFprimaII/SiRFmarco DMA support"
        depends on ARCH_SIRF
index 42d7e2f..1d90839 100644 (file)
@@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
 obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
 obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HISI_DMA) += hisi_dma.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_INTEL_IDMA64) += idma64.o
 obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IDXD) += idxd/
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
@@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
 obj-$(CONFIG_OWL_DMA) += owl-dma.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PLX_DMA) += plx_dma.o
 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
 obj-$(CONFIG_PXA_DMA) += pxa_dma.o
 obj-$(CONFIG_RENESAS_DMA) += sh/
index e4c593f..4768ef2 100644 (file)
@@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 
        /* stop DMA activity */
        if (c->desc) {
-               if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
-                       vchan_terminate_vdesc(&c->desc->vd);
-               else
-                       vchan_vdesc_fini(&c->desc->vd);
+               vchan_terminate_vdesc(&c->desc->vd);
                c->desc = NULL;
                bcm2835_dma_abort(c);
        }
index a0ee404..f1d149e 100644 (file)
@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
        struct dma_device *dma_dev;
        struct axi_dmac *dmac;
        struct resource *res;
+       struct regmap *regmap;
        int ret;
 
        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dmac);
 
-       devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+       regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
+                &axi_dmac_regmap_config);
+       if (IS_ERR(regmap)) {
+               ret = PTR_ERR(regmap);
+               goto err_free_irq;
+       }
 
        return 0;
 
+err_free_irq:
+       free_irq(dmac->irq, dmac);
 err_unregister_of:
        of_dma_controller_free(pdev->dev.of_node);
 err_unregister_device:
index 44af435..448f663 100644 (file)
@@ -1021,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
        .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
 };
 
+static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
+       .nb_channels = 32,
+       .transfer_ord_max = 7,
+       .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
 static const struct of_device_id jz4780_dma_dt_match[] = {
        { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
        { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
        { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
        { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
        { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
+       { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
        {},
 };
 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
index 03ac4b9..f3ef4ed 100644 (file)
@@ -60,6 +60,8 @@ static long dmaengine_ref_count;
 
 /* --- sysfs implementation --- */
 
+#define DMA_SLAVE_NAME "slave"
+
 /**
  * dev_to_dma_chan - convert a device pointer to its sysfs container object
  * @dev - device node
@@ -164,11 +166,152 @@ static struct class dma_devclass = {
 
 /* --- client and device registration --- */
 
-#define dma_device_satisfies_mask(device, mask) \
-       __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
-                           const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+       struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+       enum dma_transaction_type cap;
+       int err = 0;
+
+       bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+       /* 'interrupt', 'private', and 'slave' are channel capabilities,
+        * but are not associated with an operation so they do not need
+        * an entry in the channel_table
+        */
+       clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+       clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+       clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+       for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+               channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+               if (!channel_table[cap]) {
+                       err = -ENOMEM;
+                       break;
+               }
+       }
+
+       if (err) {
+               pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+               for_each_dma_cap_mask(cap, dma_cap_mask_all)
+                       free_percpu(channel_table[cap]);
+       }
+
+       return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ *     the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+       int node = dev_to_node(chan->device->dev);
+       return node == NUMA_NO_NODE ||
+               cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ *     the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+       struct dma_device *device;
+       struct dma_chan *chan;
+       struct dma_chan *min = NULL;
+       struct dma_chan *localmin = NULL;
+
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (!dma_has_cap(cap, device->cap_mask) ||
+                   dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node) {
+                       if (!chan->client_count)
+                               continue;
+                       if (!min || chan->table_count < min->table_count)
+                               min = chan;
+
+                       if (dma_chan_is_local(chan, cpu))
+                               if (!localmin ||
+                                   chan->table_count < localmin->table_count)
+                                       localmin = chan;
+               }
+       }
+
+       chan = localmin ? localmin : min;
+
+       if (chan)
+               chan->table_count++;
+
+       return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case,  and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.  Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+       struct dma_chan *chan;
+       struct dma_device *device;
+       int cpu;
+       int cap;
+
+       /* undo the last distribution */
+       for_each_dma_cap_mask(cap, dma_cap_mask_all)
+               for_each_possible_cpu(cpu)
+                       per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node)
+                       chan->table_count = 0;
+       }
+
+       /* don't populate the channel_table if no clients are available */
+       if (!dmaengine_ref_count)
+               return;
+
+       /* redistribute available channels */
+       for_each_dma_cap_mask(cap, dma_cap_mask_all)
+               for_each_online_cpu(cpu) {
+                       chan = min_chan(cap, cpu);
+                       per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+               }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+                                    const dma_cap_mask_t *want)
 {
        dma_cap_mask_t has;
 
@@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
 
 static struct module *dma_chan_to_owner(struct dma_chan *chan)
 {
-       return chan->device->dev->driver->owner;
+       return chan->device->owner;
 }
 
 /**
@@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan)
        }
 }
 
+static void dma_device_release(struct kref *ref)
+{
+       struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+       list_del_rcu(&device->global_node);
+       dma_channel_rebalance();
+
+       if (device->device_release)
+               device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+       lockdep_assert_held(&dma_list_mutex);
+       kref_put(&device->ref, dma_device_release);
+}
+
 /**
  * dma_chan_get - try to grab a dma channel's parent driver module
  * @chan - channel to grab
@@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan)
        if (!try_module_get(owner))
                return -ENODEV;
 
+       ret = kref_get_unless_zero(&chan->device->ref);
+       if (!ret) {
+               ret = -ENODEV;
+               goto module_put_out;
+       }
+
        /* allocate upon first client reference */
        if (chan->device->device_alloc_chan_resources) {
                ret = chan->device->device_alloc_chan_resources(chan);
@@ -233,6 +399,8 @@ out:
        return 0;
 
 err_out:
+       dma_device_put(chan->device);
+module_put_out:
        module_put(owner);
        return ret;
 }
@@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan)
                return;
 
        chan->client_count--;
-       module_put(dma_chan_to_owner(chan));
 
        /* This channel is not in use anymore, free it */
        if (!chan->client_count && chan->device->device_free_chan_resources) {
@@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan)
                chan->router = NULL;
                chan->route_data = NULL;
        }
+
+       dma_device_put(chan->device);
+       module_put(dma_chan_to_owner(chan));
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -288,57 +458,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 }
 EXPORT_SYMBOL(dma_sync_wait);
 
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
-       struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
-       enum dma_transaction_type cap;
-       int err = 0;
-
-       bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
-       /* 'interrupt', 'private', and 'slave' are channel capabilities,
-        * but are not associated with an operation so they do not need
-        * an entry in the channel_table
-        */
-       clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
-       clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
-       clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
-       for_each_dma_cap_mask(cap, dma_cap_mask_all) {
-               channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
-               if (!channel_table[cap]) {
-                       err = -ENOMEM;
-                       break;
-               }
-       }
-
-       if (err) {
-               pr_err("initialization failure\n");
-               for_each_dma_cap_mask(cap, dma_cap_mask_all)
-                       free_percpu(channel_table[cap]);
-       }
-
-       return err;
-}
-arch_initcall(dma_channel_table_init);
-
 /**
  * dma_find_channel - find a channel to carry out the operation
  * @tx_type: transaction type
@@ -369,97 +488,6 @@ void dma_issue_pending_all(void)
 }
 EXPORT_SYMBOL(dma_issue_pending_all);
 
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
-       int node = dev_to_node(chan->device->dev);
-       return node == NUMA_NO_NODE ||
-               cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
-       struct dma_device *device;
-       struct dma_chan *chan;
-       struct dma_chan *min = NULL;
-       struct dma_chan *localmin = NULL;
-
-       list_for_each_entry(device, &dma_device_list, global_node) {
-               if (!dma_has_cap(cap, device->cap_mask) ||
-                   dma_has_cap(DMA_PRIVATE, device->cap_mask))
-                       continue;
-               list_for_each_entry(chan, &device->channels, device_node) {
-                       if (!chan->client_count)
-                               continue;
-                       if (!min || chan->table_count < min->table_count)
-                               min = chan;
-
-                       if (dma_chan_is_local(chan, cpu))
-                               if (!localmin ||
-                                   chan->table_count < localmin->table_count)
-                                       localmin = chan;
-               }
-       }
-
-       chan = localmin ? localmin : min;
-
-       if (chan)
-               chan->table_count++;
-
-       return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case,  and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case.  Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
-       struct dma_chan *chan;
-       struct dma_device *device;
-       int cpu;
-       int cap;
-
-       /* undo the last distribution */
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               for_each_possible_cpu(cpu)
-                       per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
-       list_for_each_entry(device, &dma_device_list, global_node) {
-               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
-                       continue;
-               list_for_each_entry(chan, &device->channels, device_node)
-                       chan->table_count = 0;
-       }
-
-       /* don't populate the channel_table if no clients are available */
-       if (!dmaengine_ref_count)
-               return;
-
-       /* redistribute available channels */
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               for_each_online_cpu(cpu) {
-                       chan = min_chan(cap, cpu);
-                       per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
-               }
-}
-
 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 {
        struct dma_device *device;
@@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 {
        struct dma_chan *chan;
 
-       if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+       if (mask && !dma_device_satisfies_mask(dev, mask)) {
                dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
                return NULL;
        }
@@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
        if (has_acpi_companion(dev) && !chan)
                chan = acpi_dma_request_slave_chan_by_name(dev, name);
 
-       if (chan) {
-               /* Valid channel found or requester needs to be deferred */
-               if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
-                       return chan;
-       }
+       if (PTR_ERR(chan) == -EPROBE_DEFER)
+               return chan;
+
+       if (!IS_ERR_OR_NULL(chan))
+               goto found;
 
        /* Try to find the channel via the DMA filter map(s) */
        mutex_lock(&dma_list_mutex);
@@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
        }
        mutex_unlock(&dma_list_mutex);
 
-       return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+       if (!IS_ERR_OR_NULL(chan))
+               goto found;
+
+       return ERR_PTR(-EPROBE_DEFER);
+
+found:
+       chan->slave = dev;
+       chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+       if (!chan->name)
+               return ERR_PTR(-ENOMEM);
+
+       if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+                             DMA_SLAVE_NAME))
+               dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+       if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+               dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+       return chan;
 }
 EXPORT_SYMBOL_GPL(dma_request_chan);
 
@@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan)
        /* drop PRIVATE cap enabled by __dma_request_channel() */
        if (--chan->device->privatecnt == 0)
                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+       if (chan->slave) {
+               sysfs_remove_link(&chan->slave->kobj, chan->name);
+               kfree(chan->name);
+               chan->name = NULL;
+               chan->slave = NULL;
+       }
+       sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
        mutex_unlock(&dma_list_mutex);
 }
 EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get);
  */
 void dmaengine_put(void)
 {
-       struct dma_device *device;
+       struct dma_device *device, *_d;
        struct dma_chan *chan;
 
        mutex_lock(&dma_list_mutex);
        dmaengine_ref_count--;
        BUG_ON(dmaengine_ref_count < 0);
        /* drop channel references */
-       list_for_each_entry(device, &dma_device_list, global_node) {
+       list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
                        continue;
                list_for_each_entry(chan, &device->channels, device_node)
@@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device)
        return 0;
 }
 
+static int __dma_async_device_channel_register(struct dma_device *device,
+                                              struct dma_chan *chan,
+                                              int chan_id)
+{
+       int rc = 0;
+       int chancnt = device->chancnt;
+       atomic_t *idr_ref;
+       struct dma_chan *tchan;
+
+       tchan = list_first_entry_or_null(&device->channels,
+                                        struct dma_chan, device_node);
+       if (tchan->dev) {
+               idr_ref = tchan->dev->idr_ref;
+       } else {
+               idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+               if (!idr_ref)
+                       return -ENOMEM;
+               atomic_set(idr_ref, 0);
+       }
+
+       chan->local = alloc_percpu(typeof(*chan->local));
+       if (!chan->local)
+               goto err_out;
+       chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+       if (!chan->dev) {
+               free_percpu(chan->local);
+               chan->local = NULL;
+               goto err_out;
+       }
+
+       /*
+        * When the chan_id is a negative value, we are dynamically adding
+        * the channel. Otherwise we are static enumerating.
+        */
+       chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+       chan->dev->device.class = &dma_devclass;
+       chan->dev->device.parent = device->dev;
+       chan->dev->chan = chan;
+       chan->dev->idr_ref = idr_ref;
+       chan->dev->dev_id = device->dev_id;
+       atomic_inc(idr_ref);
+       dev_set_name(&chan->dev->device, "dma%dchan%d",
+                    device->dev_id, chan->chan_id);
+
+       rc = device_register(&chan->dev->device);
+       if (rc)
+               goto err_out;
+       chan->client_count = 0;
+       device->chancnt = chan->chan_id + 1;
+
+       return 0;
+
+ err_out:
+       free_percpu(chan->local);
+       kfree(chan->dev);
+       if (atomic_dec_return(idr_ref) == 0)
+               kfree(idr_ref);
+       return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+                                     struct dma_chan *chan)
+{
+       int rc;
+
+       rc = __dma_async_device_channel_register(device, chan, -1);
+       if (rc < 0)
+               return rc;
+
+       dma_channel_rebalance();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+                                                 struct dma_chan *chan)
+{
+       WARN_ONCE(!device->device_release && chan->client_count,
+                 "%s called while %d clients hold a reference\n",
+                 __func__, chan->client_count);
+       mutex_lock(&dma_list_mutex);
+       list_del(&chan->device_node);
+       device->chancnt--;
+       chan->dev->chan = NULL;
+       mutex_unlock(&dma_list_mutex);
+       device_unregister(&chan->dev->device);
+       free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+                                        struct dma_chan *chan)
+{
+       __dma_async_device_channel_unregister(device, chan);
+       dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
 /**
  * dma_async_device_register - registers DMA devices found
  * @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
  */
 int dma_async_device_register(struct dma_device *device)
 {
-       int chancnt = 0, rc;
+       int rc, i = 0;
        struct dma_chan* chan;
-       atomic_t *idr_ref;
 
        if (!device)
                return -ENODEV;
@@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device)
                return -EIO;
        }
 
+       device->owner = device->dev->driver->owner;
+
        if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
                dev_err(device->dev,
                        "Device claims capability %s, but op is not defined\n",
@@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device)
                return -EIO;
        }
 
+       if (!device->device_release)
+               dev_warn(device->dev,
+                        "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+       kref_init(&device->ref);
+
        /* note: this only matters in the
         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
         */
        if (device_has_all_tx_types(device))
                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 
-       idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
-       if (!idr_ref)
-               return -ENOMEM;
        rc = get_dma_id(device);
-       if (rc != 0) {
-               kfree(idr_ref);
+       if (rc != 0)
                return rc;
-       }
-
-       atomic_set(idr_ref, 0);
 
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
-               rc = -ENOMEM;
-               chan->local = alloc_percpu(typeof(*chan->local));
-               if (chan->local == NULL)
-                       goto err_out;
-               chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
-               if (chan->dev == NULL) {
-                       free_percpu(chan->local);
-                       chan->local = NULL;
+               rc = __dma_async_device_channel_register(device, chan, i++);
+               if (rc < 0)
                        goto err_out;
-               }
-
-               chan->chan_id = chancnt++;
-               chan->dev->device.class = &dma_devclass;
-               chan->dev->device.parent = device->dev;
-               chan->dev->chan = chan;
-               chan->dev->idr_ref = idr_ref;
-               chan->dev->dev_id = device->dev_id;
-               atomic_inc(idr_ref);
-               dev_set_name(&chan->dev->device, "dma%dchan%d",
-                            device->dev_id, chan->chan_id);
-
-               rc = device_register(&chan->dev->device);
-               if (rc) {
-                       free_percpu(chan->local);
-                       chan->local = NULL;
-                       kfree(chan->dev);
-                       atomic_dec(idr_ref);
-                       goto err_out;
-               }
-               chan->client_count = 0;
-       }
-
-       if (!chancnt) {
-               dev_err(device->dev, "%s: device has no channels!\n", __func__);
-               rc = -ENODEV;
-               goto err_out;
        }
 
-       device->chancnt = chancnt;
-
        mutex_lock(&dma_list_mutex);
        /* take references on public channels */
        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device)
 
 err_out:
        /* if we never registered a channel just release the idr */
-       if (atomic_read(idr_ref) == 0) {
+       if (!device->chancnt) {
                ida_free(&dma_ida, device->dev_id);
-               kfree(idr_ref);
                return rc;
        }
 
@@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register);
  */
 void dma_async_device_unregister(struct dma_device *device)
 {
-       struct dma_chan *chan;
+       struct dma_chan *chan, *n;
+
+       list_for_each_entry_safe(chan, n, &device->channels, device_node)
+               __dma_async_device_channel_unregister(device, chan);
 
        mutex_lock(&dma_list_mutex);
-       list_del_rcu(&device->global_node);
+       /*
+        * setting DMA_PRIVATE ensures the device being torn down will not
+        * be used in the channel_table
+        */
+       dma_cap_set(DMA_PRIVATE, device->cap_mask);
        dma_channel_rebalance();
+       dma_device_put(device);
        mutex_unlock(&dma_list_mutex);
-
-       list_for_each_entry(chan, &device->channels, device_node) {
-               WARN_ONCE(chan->client_count,
-                         "%s called while %d clients hold a reference\n",
-                         __func__, chan->client_count);
-               mutex_lock(&dma_list_mutex);
-               chan->dev->chan = NULL;
-               mutex_unlock(&dma_list_mutex);
-               device_unregister(&chan->dev->device);
-               free_percpu(chan->local);
-       }
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
 
@@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 }
 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 
+static inline int desc_check_and_set_metadata_mode(
+       struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+       /* Make sure that the metadata mode is not mixed */
+       if (!desc->desc_metadata_mode) {
+               if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+                       desc->desc_metadata_mode = mode;
+               else
+                       return -ENOTSUPP;
+       } else if (desc->desc_metadata_mode != mode) {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+                                  void *data, size_t len)
+{
+       int ret;
+
+       if (!desc)
+               return -EINVAL;
+
+       ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+       if (ret)
+               return ret;
+
+       if (!desc->metadata_ops || !desc->metadata_ops->attach)
+               return -ENOTSUPP;
+
+       return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+                                     size_t *payload_len, size_t *max_len)
+{
+       int ret;
+
+       if (!desc)
+               return ERR_PTR(-EINVAL);
+
+       ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+               return ERR_PTR(-ENOTSUPP);
+
+       return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+                                   size_t payload_len)
+{
+       int ret;
+
+       if (!desc)
+               return -EINVAL;
+
+       ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+       if (ret)
+               return ret;
+
+       if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+               return -ENOTSUPP;
+
+       return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
 /* dma_wait_for_async_tx - spin wait for a transaction to complete
  * @tx: in-flight transaction to wait on
  */
@@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void)
        return class_register(&dma_devclass);
 }
 arch_initcall(dma_bus_init);
-
-
index 501c0b0..e8a320c 100644 (file)
@@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
                state->last = complete;
                state->used = used;
                state->residue = 0;
+               state->in_flight_bytes = 0;
        }
        return dma_async_is_complete(cookie, complete, used);
 }
@@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
                state->residue = residue;
 }
 
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+                                          u32 in_flight_bytes)
+{
+       if (state)
+               state->in_flight_bytes = in_flight_bytes;
+}
+
 struct dmaengine_desc_callback {
        dma_async_tx_callback callback;
        dma_async_tx_callback_result callback_result;
@@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
        return (cb->callback) ? true : false;
 }
 
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
 #endif
index a1ce307..14c1ac2 100644 (file)
@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
 
        vchan_get_all_descriptors(&chan->vc, &head);
 
-       /*
-        * As vchan_dma_desc_free_list can access to desc_allocated list
-        * we need to call it in vc.lock context.
-        */
-       vchan_dma_desc_free_list(&chan->vc, &head);
-
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&chan->vc, &head);
+
        dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
 
        return 0;
index b1a7ca9..5697c36 100644 (file)
@@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
        u32 ch = fsl_chan->vchan.chan.chan_id;
        void __iomem *muxaddr;
        unsigned int chans_per_mux, ch_off;
+       int endian_diff[4] = {3, 1, -1, -3};
        u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
 
        chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
        ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+       if (fsl_chan->edma->drvdata->mux_swap)
+               ch_off += endian_diff[ch_off % 4];
+
        muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
        slot = EDMAMUX_CHCFG_SOURCE(slot);
 
index 5eaa290..67e4225 100644 (file)
@@ -147,6 +147,7 @@ struct fsl_edma_drvdata {
        enum edma_version       version;
        u32                     dmamuxs;
        bool                    has_dmaclk;
+       bool                    mux_swap;
        int                     (*setup_irq)(struct platform_device *pdev,
                                             struct fsl_edma_engine *fsl_edma);
 };
index b626c06..eff7ebd 100644 (file)
@@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = {
        .setup_irq = fsl_edma_irq_init,
 };
 
+static struct fsl_edma_drvdata ls1028a_data = {
+       .version = v1,
+       .dmamuxs = DMAMUX_NR,
+       .mux_swap = true,
+       .setup_irq = fsl_edma_irq_init,
+};
+
 static struct fsl_edma_drvdata imx7ulp_data = {
        .version = v3,
        .dmamuxs = 1,
@@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = {
 
 static const struct of_device_id fsl_edma_dt_ids[] = {
        { .compatible = "fsl,vf610-edma", .data = &vf610_data},
+       { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
        { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
        { /* sentinel */ }
 };
index 8979208..95cc025 100644 (file)
@@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
 
        vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 
-       if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+       if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
                return;
 
        list_for_each_entry_safe(comp_temp, _comp_temp,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
new file mode 100644 (file)
index 0000000..ed36192
--- /dev/null
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019 HiSilicon Limited. */
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "virt-dma.h"
+
+#define HISI_DMA_SQ_BASE_L             0x0
+#define HISI_DMA_SQ_BASE_H             0x4
+#define HISI_DMA_SQ_DEPTH              0x8
+#define HISI_DMA_SQ_TAIL_PTR           0xc
+#define HISI_DMA_CQ_BASE_L             0x10
+#define HISI_DMA_CQ_BASE_H             0x14
+#define HISI_DMA_CQ_DEPTH              0x18
+#define HISI_DMA_CQ_HEAD_PTR           0x1c
+#define HISI_DMA_CTRL0                 0x20
+#define HISI_DMA_CTRL0_QUEUE_EN_S      0
+#define HISI_DMA_CTRL0_QUEUE_PAUSE_S   4
+#define HISI_DMA_CTRL1                 0x24
+#define HISI_DMA_CTRL1_QUEUE_RESET_S   0
+#define HISI_DMA_Q_FSM_STS             0x30
+#define HISI_DMA_FSM_STS_MASK          GENMASK(3, 0)
+#define HISI_DMA_INT_STS               0x40
+#define HISI_DMA_INT_STS_MASK          GENMASK(12, 0)
+#define HISI_DMA_INT_MSK               0x44
+#define HISI_DMA_MODE                  0x217c
+#define HISI_DMA_OFFSET                        0x100
+
+#define HISI_DMA_MSI_NUM               30
+#define HISI_DMA_CHAN_NUM              30
+#define HISI_DMA_Q_DEPTH_VAL           1024
+
+#define PCI_BAR_2                      2
+
+enum hisi_dma_mode {
+       EP = 0,
+       RC,
+};
+
+enum hisi_dma_chan_status {
+       DISABLE = -1,
+       IDLE = 0,
+       RUN,
+       CPL,
+       PAUSE,
+       HALT,
+       ABORT,
+       WAIT,
+       BUFFCLR,
+};
+
+struct hisi_dma_sqe {
+       __le32 dw0;
+#define OPCODE_MASK                    GENMASK(3, 0)
+#define OPCODE_SMALL_PACKAGE           0x1
+#define OPCODE_M2M                     0x4
+#define LOCAL_IRQ_EN                   BIT(8)
+#define ATTR_SRC_MASK                  GENMASK(14, 12)
+       __le32 dw1;
+       __le32 dw2;
+#define ATTR_DST_MASK                  GENMASK(26, 24)
+       __le32 length;
+       __le64 src_addr;
+       __le64 dst_addr;
+};
+
+struct hisi_dma_cqe {
+       __le32 rsv0;
+       __le32 rsv1;
+       __le16 sq_head;
+       __le16 rsv2;
+       __le16 rsv3;
+       __le16 w0;
+#define STATUS_MASK                    GENMASK(15, 1)
+#define STATUS_SUCC                    0x0
+#define VALID_BIT                      BIT(0)
+};
+
+struct hisi_dma_desc {
+       struct virt_dma_desc vd;
+       struct hisi_dma_sqe sqe;
+};
+
+struct hisi_dma_chan {
+       struct virt_dma_chan vc;
+       struct hisi_dma_dev *hdma_dev;
+       struct hisi_dma_sqe *sq;
+       struct hisi_dma_cqe *cq;
+       dma_addr_t sq_dma;
+       dma_addr_t cq_dma;
+       u32 sq_tail;
+       u32 cq_head;
+       u32 qp_num;
+       enum hisi_dma_chan_status status;
+       struct hisi_dma_desc *desc;
+};
+
+struct hisi_dma_dev {
+       struct pci_dev *pdev;
+       void __iomem *base;
+       struct dma_device dma_dev;
+       u32 chan_num;
+       u32 chan_depth;
+       struct hisi_dma_chan chan[];
+};
+
+static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct hisi_dma_chan, vc.chan);
+}
+
+static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct hisi_dma_desc, vd);
+}
+
+static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
+                                      u32 val)
+{
+       writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+}
+
+static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
+{
+       u32 tmp;
+
+       tmp = readl_relaxed(addr);
+       tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+       writel_relaxed(tmp, addr);
+}
+
+static void hisi_dma_free_irq_vectors(void *data)
+{
+       pci_free_irq_vectors(data);
+}
+
+static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+                              bool pause)
+{
+       void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+                            HISI_DMA_OFFSET;
+
+       hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+}
+
+static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+                               bool enable)
+{
+       void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+                            HISI_DMA_OFFSET;
+
+       hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+}
+
+static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
+                           HISI_DMA_INT_STS_MASK);
+}
+
+static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       void __iomem *base = hdma_dev->base;
+
+       hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
+                           HISI_DMA_INT_STS_MASK);
+       hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+}
+
+static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+       void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
+                            HISI_DMA_OFFSET;
+
+       hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+}
+
+static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+{
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+       u32 index = chan->qp_num, tmp;
+       int ret;
+
+       hisi_dma_pause_dma(hdma_dev, index, true);
+       hisi_dma_enable_dma(hdma_dev, index, false);
+       hisi_dma_mask_irq(hdma_dev, index);
+
+       ret = readl_relaxed_poll_timeout(hdma_dev->base +
+               HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+               FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
+               WARN_ON(1);
+       }
+
+       hisi_dma_do_reset(hdma_dev, index);
+       hisi_dma_reset_qp_point(hdma_dev, index);
+       hisi_dma_pause_dma(hdma_dev, index, false);
+       hisi_dma_enable_dma(hdma_dev, index, true);
+       hisi_dma_unmask_irq(hdma_dev, index);
+
+       ret = readl_relaxed_poll_timeout(hdma_dev->base +
+               HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+               FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
+               WARN_ON(1);
+       }
+}
+
+static void hisi_dma_free_chan_resources(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+
+       hisi_dma_reset_hw_chan(chan);
+       vchan_free_chan_resources(&chan->vc);
+
+       memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
+       memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
+       chan->sq_tail = 0;
+       chan->cq_head = 0;
+       chan->status = DISABLE;
+}
+
+static void hisi_dma_desc_free(struct virt_dma_desc *vd)
+{
+       kfree(to_hisi_dma_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *
+hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
+                        size_t len, unsigned long flags)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       struct hisi_dma_desc *desc;
+
+       desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+
+       desc->sqe.length = cpu_to_le32(len);
+       desc->sqe.src_addr = cpu_to_le64(src);
+       desc->sqe.dst_addr = cpu_to_le64(dst);
+
+       return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+}
+
+static enum dma_status
+hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+                  struct dma_tx_state *txstate)
+{
+       return dma_cookie_status(c, cookie, txstate);
+}
+
+static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
+{
+       struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+       struct hisi_dma_desc *desc;
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&chan->vc);
+       if (!vd) {
+               dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
+               chan->desc = NULL;
+               return;
+       }
+       list_del(&vd->node);
+       desc = to_hisi_dma_desc(vd);
+       chan->desc = desc;
+
+       memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
+
+       /* update other field in sqe */
+       sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
+       sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
+
+       /* make sure data has been updated in sqe */
+       wmb();
+
+       /* update sq tail, point to new sqe position */
+       chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
+
+       /* update sq_tail to trigger a new task */
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
+                           chan->sq_tail);
+}
+
+static void hisi_dma_issue_pending(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       if (vchan_issue_pending(&chan->vc))
+               hisi_dma_start_transfer(chan);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int hisi_dma_terminate_all(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
+       if (chan->desc) {
+               vchan_terminate_vdesc(&chan->desc->vd);
+               chan->desc = NULL;
+       }
+
+       vchan_get_all_descriptors(&chan->vc, &head);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       vchan_dma_desc_free_list(&chan->vc, &head);
+       hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
+
+       return 0;
+}
+
+static void hisi_dma_synchronize(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+
+       vchan_synchronize(&chan->vc);
+}
+
+static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
+{
+       size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
+       size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
+       struct device *dev = &hdma_dev->pdev->dev;
+       struct hisi_dma_chan *chan;
+       int i;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               chan = &hdma_dev->chan[i];
+               chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
+                                              GFP_KERNEL);
+               if (!chan->sq)
+                       return -ENOMEM;
+
+               chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
+                                              GFP_KERNEL);
+               if (!chan->cq)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+       struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+       u32 hw_depth = hdma_dev->chan_depth - 1;
+       void __iomem *base = hdma_dev->base;
+
+       /* set sq, cq base */
+       hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+                           lower_32_bits(chan->sq_dma));
+       hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+                           upper_32_bits(chan->sq_dma));
+       hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+                           lower_32_bits(chan->cq_dma));
+       hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+                           upper_32_bits(chan->cq_dma));
+
+       /* set sq, cq depth */
+       hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
+       hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+
+       /* init sq tail and cq head */
+       hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+       hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       hisi_dma_init_hw_qp(hdma_dev, qp_index);
+       hisi_dma_unmask_irq(hdma_dev, qp_index);
+       hisi_dma_enable_dma(hdma_dev, qp_index, true);
+}
+
+static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+}
+
+static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
+{
+       int i;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               hdma_dev->chan[i].qp_num = i;
+               hdma_dev->chan[i].hdma_dev = hdma_dev;
+               hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
+               vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
+               hisi_dma_enable_qp(hdma_dev, i);
+       }
+}
+
+static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
+{
+       int i;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               hisi_dma_disable_qp(hdma_dev, i);
+               tasklet_kill(&hdma_dev->chan[i].vc.task);
+       }
+}
+
+static irqreturn_t hisi_dma_irq(int irq, void *data)
+{
+       struct hisi_dma_chan *chan = data;
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+       struct hisi_dma_desc *desc;
+       struct hisi_dma_cqe *cqe;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       desc = chan->desc;
+       cqe = chan->cq + chan->cq_head;
+       if (desc) {
+               if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
+                       chan->cq_head = (chan->cq_head + 1) %
+                                       hdma_dev->chan_depth;
+                       hisi_dma_chan_write(hdma_dev->base,
+                                           HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
+                                           chan->cq_head);
+                       vchan_cookie_complete(&desc->vd);
+               } else {
+                       dev_err(&hdma_dev->pdev->dev, "task error!\n");
+               }
+
+               chan->desc = NULL;
+       }
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
+{
+       struct pci_dev *pdev = hdma_dev->pdev;
+       int i, ret;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+                                      hisi_dma_irq, IRQF_SHARED, "hisi_dma",
+                                      &hdma_dev->chan[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* This function enables all hw channels in a device */
+static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
+{
+       int ret;
+
+       ret = hisi_dma_alloc_qps_mem(hdma_dev);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
+               return ret;
+       }
+
+       ret = hisi_dma_request_qps_irq(hdma_dev);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
+               return ret;
+       }
+
+       hisi_dma_enable_qps(hdma_dev);
+
+       return 0;
+}
+
+static void hisi_dma_disable_hw_channels(void *data)
+{
+       hisi_dma_disable_qps(data);
+}
+
+static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
+                             enum hisi_dma_mode mode)
+{
+       writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+}
+
+static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct device *dev = &pdev->dev;
+       struct hisi_dma_dev *hdma_dev;
+       struct dma_device *dma_dev;
+       size_t dev_size;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               dev_err(dev, "failed to enable device mem!\n");
+               return ret;
+       }
+
+       ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
+       if (ret) {
+               dev_err(dev, "failed to remap I/O region!\n");
+               return ret;
+       }
+
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
+
+       ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
+
+       dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
+                  sizeof(*hdma_dev);
+       hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
+       if (!hdma_dev)
+               return -EINVAL;
+
+       hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
+       hdma_dev->pdev = pdev;
+       hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
+       hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+
+       pci_set_drvdata(pdev, hdma_dev);
+       pci_set_master(pdev);
+
+       ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
+                                   PCI_IRQ_MSI);
+       if (ret < 0) {
+               dev_err(dev, "Failed to allocate MSI vectors!\n");
+               return ret;
+       }
+
+       ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
+       if (ret)
+               return ret;
+
+       dma_dev = &hdma_dev->dma_dev;
+       dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+       dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+       dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+       dma_dev->device_tx_status = hisi_dma_tx_status;
+       dma_dev->device_issue_pending = hisi_dma_issue_pending;
+       dma_dev->device_terminate_all = hisi_dma_terminate_all;
+       dma_dev->device_synchronize = hisi_dma_synchronize;
+       dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+       dma_dev->dev = dev;
+       INIT_LIST_HEAD(&dma_dev->channels);
+
+       hisi_dma_set_mode(hdma_dev, RC);
+
+       ret = hisi_dma_enable_hw_channels(hdma_dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable hw channel!\n");
+               return ret;
+       }
+
+       ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
+                                      hdma_dev);
+       if (ret)
+               return ret;
+
+       ret = dmaenginem_async_device_register(dma_dev);
+       if (ret < 0)
+               dev_err(dev, "failed to register device!\n");
+
+       return ret;
+}
+
+static const struct pci_device_id hisi_dma_pci_tbl[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
+       { 0, }
+};
+
+static struct pci_driver hisi_dma_pci_driver = {
+       .name           = "hisi_dma",
+       .id_table       = hisi_dma_pci_tbl,
+       .probe          = hisi_dma_probe,
+};
+
+module_pci_driver(hisi_dma_pci_driver);
+
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
new file mode 100644 (file)
index 0000000..8978b89
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IDXD) += idxd.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
new file mode 100644 (file)
index 0000000..1d73478
--- /dev/null
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/sched/task.h>
+#include <linux/intel-svm.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+struct idxd_cdev_context {
+       const char *name;
+       dev_t devt;
+       struct ida minor_ida;
+};
+
+/*
+ * ictx is an array based off of accelerator types. enum idxd_type
+ * is used as index
+ */
+static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
+       { .name = "dsa" },
+};
+
+struct idxd_user_context {
+       struct idxd_wq *wq;
+       struct task_struct *task;
+       unsigned int flags;
+};
+
+enum idxd_cdev_cleanup {
+       CDEV_NORMAL = 0,
+       CDEV_FAILED,
+};
+
+static void idxd_cdev_dev_release(struct device *dev)
+{
+       dev_dbg(dev, "releasing cdev device\n");
+       kfree(dev);
+}
+
+static struct device_type idxd_cdev_device_type = {
+       .name = "idxd_cdev",
+       .release = idxd_cdev_dev_release,
+};
+
+static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+{
+       struct cdev *cdev = inode->i_cdev;
+
+       return container_of(cdev, struct idxd_cdev, cdev);
+}
+
+static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+{
+       return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+}
+
+static inline struct idxd_wq *inode_wq(struct inode *inode)
+{
+       return idxd_cdev_wq(inode_idxd_cdev(inode));
+}
+
+static int idxd_cdev_open(struct inode *inode, struct file *filp)
+{
+       struct idxd_user_context *ctx;
+       struct idxd_device *idxd;
+       struct idxd_wq *wq;
+       struct device *dev;
+       struct idxd_cdev *idxd_cdev;
+
+       wq = inode_wq(inode);
+       idxd = wq->idxd;
+       dev = &idxd->pdev->dev;
+       idxd_cdev = &wq->idxd_cdev;
+
+       dev_dbg(dev, "%s called\n", __func__);
+
+       if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+               return -EBUSY;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->wq = wq;
+       filp->private_data = ctx;
+       idxd_wq_get(wq);
+       return 0;
+}
+
+static int idxd_cdev_release(struct inode *node, struct file *filep)
+{
+       struct idxd_user_context *ctx = filep->private_data;
+       struct idxd_wq *wq = ctx->wq;
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+
+       dev_dbg(dev, "%s called\n", __func__);
+       filep->private_data = NULL;
+
+       kfree(ctx);
+       idxd_wq_put(wq);
+       return 0;
+}
+
+static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
+                    const char *func)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+               dev_info_ratelimited(dev,
+                                    "%s: %s: mapping too large: %lu\n",
+                                    current->comm, func,
+                                    vma->vm_end - vma->vm_start);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct idxd_user_context *ctx = filp->private_data;
+       struct idxd_wq *wq = ctx->wq;
+       struct idxd_device *idxd = wq->idxd;
+       struct pci_dev *pdev = idxd->pdev;
+       phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
+       unsigned long pfn;
+       int rc;
+
+       dev_dbg(&pdev->dev, "%s called\n", __func__);
+       rc = check_vma(wq, vma, __func__);
+
+       vma->vm_flags |= VM_DONTCOPY;
+       pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
+                               IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       vma->vm_private_data = ctx;
+
+       return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+                       vma->vm_page_prot);
+}
+
+static __poll_t idxd_cdev_poll(struct file *filp,
+                              struct poll_table_struct *wait)
+{
+       struct idxd_user_context *ctx = filp->private_data;
+       struct idxd_wq *wq = ctx->wq;
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       unsigned long flags;
+       __poll_t out = 0;
+
+       poll_wait(filp, &idxd_cdev->err_queue, wait);
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       if (idxd->sw_err.valid)
+               out = EPOLLIN | EPOLLRDNORM;
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+       return out;
+}
+
+static const struct file_operations idxd_cdev_fops = {
+       .owner = THIS_MODULE,
+       .open = idxd_cdev_open,
+       .release = idxd_cdev_release,
+       .mmap = idxd_cdev_mmap,
+       .poll = idxd_cdev_poll,
+};
+
+int idxd_cdev_get_major(struct idxd_device *idxd)
+{
+       return MAJOR(ictx[idxd->type].devt);
+}
+
+static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       struct idxd_cdev_context *cdev_ctx;
+       struct device *dev;
+       int minor, rc;
+
+       idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+       if (!idxd_cdev->dev)
+               return -ENOMEM;
+
+       dev = idxd_cdev->dev;
+       dev->parent = &idxd->pdev->dev;
+       dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+                    idxd->id, wq->id);
+       dev->bus = idxd_get_bus_type(idxd);
+
+       cdev_ctx = &ictx[wq->idxd->type];
+       minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+       if (minor < 0) {
+               rc = minor;
+               goto ida_err;
+       }
+
+       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+       dev->type = &idxd_cdev_device_type;
+       rc = device_register(dev);
+       if (rc < 0) {
+               dev_err(&idxd->pdev->dev, "device register failed\n");
+               put_device(dev);
+               goto dev_reg_err;
+       }
+       idxd_cdev->minor = minor;
+
+       return 0;
+
+ dev_reg_err:
+       ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ ida_err:
+       kfree(dev);
+       idxd_cdev->dev = NULL;
+       return rc;
+}
+
+static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+                                enum idxd_cdev_cleanup cdev_state)
+{
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       struct idxd_cdev_context *cdev_ctx;
+
+       cdev_ctx = &ictx[wq->idxd->type];
+       if (cdev_state == CDEV_NORMAL)
+               cdev_del(&idxd_cdev->cdev);
+       device_unregister(idxd_cdev->dev);
+       /*
+        * The device_type->release() will be called on the device and free
+        * the allocated struct device. We can just forget it.
+        */
+       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       idxd_cdev->dev = NULL;
+       idxd_cdev->minor = -1;
+}
+
+int idxd_wq_add_cdev(struct idxd_wq *wq)
+{
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       struct cdev *cdev = &idxd_cdev->cdev;
+       struct device *dev;
+       int rc;
+
+       rc = idxd_wq_cdev_dev_setup(wq);
+       if (rc < 0)
+               return rc;
+
+       dev = idxd_cdev->dev;
+       cdev_init(cdev, &idxd_cdev_fops);
+       cdev_set_parent(cdev, &dev->kobj);
+       rc = cdev_add(cdev, dev->devt, 1);
+       if (rc) {
+               dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+               idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+               return rc;
+       }
+
+       init_waitqueue_head(&idxd_cdev->err_queue);
+       return 0;
+}
+
+void idxd_wq_del_cdev(struct idxd_wq *wq)
+{
+       idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+}
+
+int idxd_cdev_register(void)
+{
+       int rc, i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               ida_init(&ictx[i].minor_ida);
+               rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
+                                        ictx[i].name);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+void idxd_cdev_remove(void)
+{
+       int i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               unregister_chrdev_region(ictx[i].devt, MINORMASK);
+               ida_destroy(&ictx[i].minor_ida);
+       }
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
new file mode 100644 (file)
index 0000000..ada69e7
--- /dev/null
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+
+/* Interrupt control bits */
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       int msixcnt = pci_msix_vec_count(pdev);
+       union msix_perm perm;
+       u32 offset;
+
+       if (vec_id < 0 || vec_id >= msixcnt)
+               return -EINVAL;
+
+       offset = idxd->msix_perm_offset + vec_id * 8;
+       perm.bits = ioread32(idxd->reg_base + offset);
+       perm.ignore = 1;
+       iowrite32(perm.bits, idxd->reg_base + offset);
+
+       return 0;
+}
+
+void idxd_mask_msix_vectors(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       int msixcnt = pci_msix_vec_count(pdev);
+       int i, rc;
+
+       for (i = 0; i < msixcnt; i++) {
+               rc = idxd_mask_msix_vector(idxd, i);
+               if (rc < 0)
+                       dev_warn(&pdev->dev,
+                                "Failed disabling msix vec %d\n", i);
+       }
+}
+
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       int msixcnt = pci_msix_vec_count(pdev);
+       union msix_perm perm;
+       u32 offset;
+
+       if (vec_id < 0 || vec_id >= msixcnt)
+               return -EINVAL;
+
+       offset = idxd->msix_perm_offset + vec_id * 8;
+       perm.bits = ioread32(idxd->reg_base + offset);
+       perm.ignore = 0;
+       iowrite32(perm.bits, idxd->reg_base + offset);
+
+       return 0;
+}
+
+void idxd_unmask_error_interrupts(struct idxd_device *idxd)
+{
+       union genctrl_reg genctrl;
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.softerr_int_en = 1;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+void idxd_mask_error_interrupts(struct idxd_device *idxd)
+{
+       union genctrl_reg genctrl;
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.softerr_int_en = 0;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+static void free_hw_descs(struct idxd_wq *wq)
+{
+       int i;
+
+       for (i = 0; i < wq->num_descs; i++)
+               kfree(wq->hw_descs[i]);
+
+       kfree(wq->hw_descs);
+}
+
+static int alloc_hw_descs(struct idxd_wq *wq, int num)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+       int i;
+       int node = dev_to_node(dev);
+
+       wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
+                                   GFP_KERNEL, node);
+       if (!wq->hw_descs)
+               return -ENOMEM;
+
+       for (i = 0; i < num; i++) {
+               wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
+                                              GFP_KERNEL, node);
+               if (!wq->hw_descs[i]) {
+                       free_hw_descs(wq);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static void free_descs(struct idxd_wq *wq)
+{
+       int i;
+
+       for (i = 0; i < wq->num_descs; i++)
+               kfree(wq->descs[i]);
+
+       kfree(wq->descs);
+}
+
+static int alloc_descs(struct idxd_wq *wq, int num)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+       int i;
+       int node = dev_to_node(dev);
+
+       wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
+                                GFP_KERNEL, node);
+       if (!wq->descs)
+               return -ENOMEM;
+
+       for (i = 0; i < num; i++) {
+               wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
+                                           GFP_KERNEL, node);
+               if (!wq->descs[i]) {
+                       free_descs(wq);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+/* WQ control bits */
+int idxd_wq_alloc_resources(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_group *group = wq->group;
+       struct device *dev = &idxd->pdev->dev;
+       int rc, num_descs, i;
+
+       if (wq->type != IDXD_WQT_KERNEL)
+               return 0;
+
+       num_descs = wq->size +
+               idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
+       wq->num_descs = num_descs;
+
+       rc = alloc_hw_descs(wq, num_descs);
+       if (rc < 0)
+               return rc;
+
+       wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
+       wq->compls = dma_alloc_coherent(dev, wq->compls_size,
+                                       &wq->compls_addr, GFP_KERNEL);
+       if (!wq->compls) {
+               rc = -ENOMEM;
+               goto fail_alloc_compls;
+       }
+
+       rc = alloc_descs(wq, num_descs);
+       if (rc < 0)
+               goto fail_alloc_descs;
+
+       rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
+                              dev_to_node(dev));
+       if (rc < 0)
+               goto fail_sbitmap_init;
+
+       for (i = 0; i < num_descs; i++) {
+               struct idxd_desc *desc = wq->descs[i];
+
+               desc->hw = wq->hw_descs[i];
+               desc->completion = &wq->compls[i];
+               desc->compl_dma  = wq->compls_addr +
+                       sizeof(struct dsa_completion_record) * i;
+               desc->id = i;
+               desc->wq = wq;
+
+               dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+               desc->txd.tx_submit = idxd_dma_tx_submit;
+       }
+
+       return 0;
+
+ fail_sbitmap_init:
+       free_descs(wq);
+ fail_alloc_descs:
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ fail_alloc_compls:
+       free_hw_descs(wq);
+       return rc;
+}
+
+void idxd_wq_free_resources(struct idxd_wq *wq)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       if (wq->type != IDXD_WQT_KERNEL)
+               return;
+
+       free_hw_descs(wq);
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_free(&wq->sbmap);
+}
+
+int idxd_wq_enable(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 status;
+       int rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+
+       if (wq->state == IDXD_WQ_ENABLED) {
+               dev_dbg(dev, "WQ %d already enabled\n", wq->id);
+               return -ENXIO;
+       }
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       if (status != IDXD_CMDSTS_SUCCESS &&
+           status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
+               dev_dbg(dev, "WQ enable failed: %#x\n", status);
+               return -ENXIO;
+       }
+
+       wq->state = IDXD_WQ_ENABLED;
+       dev_dbg(dev, "WQ %d enabled\n", wq->id);
+       return 0;
+}
+
+int idxd_wq_disable(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 status, operand;
+       int rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       dev_dbg(dev, "Disabling WQ %d\n", wq->id);
+
+       if (wq->state != IDXD_WQ_ENABLED) {
+               dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+               return 0;
+       }
+
+       operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+       rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       if (status != IDXD_CMDSTS_SUCCESS) {
+               dev_dbg(dev, "WQ disable failed: %#x\n", status);
+               return -ENXIO;
+       }
+
+       wq->state = IDXD_WQ_DISABLED;
+       dev_dbg(dev, "WQ %d disabled\n", wq->id);
+       return 0;
+}
+
+int idxd_wq_map_portal(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct pci_dev *pdev = idxd->pdev;
+       struct device *dev = &pdev->dev;
+       resource_size_t start;
+
+       start = pci_resource_start(pdev, IDXD_WQ_BAR);
+       start = start + wq->id * IDXD_PORTAL_SIZE;
+
+       wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+       if (!wq->dportal)
+               return -ENOMEM;
+       dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
+
+       return 0;
+}
+
+void idxd_wq_unmap_portal(struct idxd_wq *wq)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       devm_iounmap(dev, wq->dportal);
+}
+
+/* Device control bits */
+static inline bool idxd_is_enabled(struct idxd_device *idxd)
+{
+       union gensts_reg gensts;
+
+       gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+       if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
+               return true;
+       return false;
+}
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+{
+       u32 sts, to = timeout;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+       while (sts & IDXD_CMDSTS_ACTIVE && --to) {
+               cpu_relax();
+               sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+       }
+
+       if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
+               dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
+               *status = 0;
+               return -EBUSY;
+       }
+
+       *status = sts;
+       return 0;
+}
+
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+{
+       union idxd_command_reg cmd;
+       int rc;
+       u32 status;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd = cmd_code;
+       cmd.operand = operand;
+       dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
+               __func__, cmd_code, operand);
+       iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+       return 0;
+}
+
+int idxd_device_enable(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+       u32 status;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       if (idxd_is_enabled(idxd)) {
+               dev_dbg(dev, "Device already enabled\n");
+               return -ENXIO;
+       }
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       /* If the command is successful or if the device was enabled */
+       if (status != IDXD_CMDSTS_SUCCESS &&
+           status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
+               dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+               return -ENXIO;
+       }
+
+       idxd->state = IDXD_DEV_ENABLED;
+       return 0;
+}
+
+int idxd_device_disable(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+       u32 status;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       if (!idxd_is_enabled(idxd)) {
+               dev_dbg(dev, "Device is not enabled\n");
+               return 0;
+       }
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       /* If the command is successful or if the device was disabled */
+       if (status != IDXD_CMDSTS_SUCCESS &&
+           !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
+               dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+               rc = -ENXIO;
+               return rc;
+       }
+
+       idxd->state = IDXD_DEV_CONF_READY;
+       return 0;
+}
+
+int __idxd_device_reset(struct idxd_device *idxd)
+{
+       u32 status;
+       int rc;
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
+
+int idxd_device_reset(struct idxd_device *idxd)
+{
+       unsigned long flags;
+       int rc;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       rc = __idxd_device_reset(idxd);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       return rc;
+}
+
+/* Device configuration bits */
+static void idxd_group_config_write(struct idxd_group *group)
+{
+       struct idxd_device *idxd = group->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       int i;
+       u32 grpcfg_offset;
+
+       dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
+
+       /* setup GRPWQCFG */
+       for (i = 0; i < 4; i++) {
+               grpcfg_offset = idxd->grpcfg_offset +
+                       group->id * 64 + i * sizeof(u64);
+               iowrite64(group->grpcfg.wqs[i],
+                         idxd->reg_base + grpcfg_offset);
+               dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+                       group->id, i, grpcfg_offset,
+                       ioread64(idxd->reg_base + grpcfg_offset));
+       }
+
+       /* setup GRPENGCFG */
+       grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+       iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
+       dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+               grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
+
+       /* setup GRPFLAGS */
+       grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+       iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+       dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+               group->id, grpcfg_offset,
+               ioread32(idxd->reg_base + grpcfg_offset));
+}
+
+static int idxd_groups_config_write(struct idxd_device *idxd)
+
+{
+       union gencfg_reg reg;
+       int i;
+       struct device *dev = &idxd->pdev->dev;
+
+       /* Setup bandwidth token limit */
+       if (idxd->token_limit) {
+               reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+               reg.token_limit = idxd->token_limit;
+               iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+       }
+
+       dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
+               ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               idxd_group_config_write(group);
+       }
+
+       return 0;
+}
+
+static int idxd_wq_config_write(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 wq_offset;
+       int i;
+
+       if (!wq->group)
+               return 0;
+
+       memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+
+       /* byte 0-3 */
+       wq->wqcfg.wq_size = wq->size;
+
+       if (wq->size == 0) {
+               dev_warn(dev, "Incorrect work queue size: 0\n");
+               return -EINVAL;
+       }
+
+       /* bytes 4-7 */
+       wq->wqcfg.wq_thresh = wq->threshold;
+
+       /* byte 8-11 */
+       wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+       wq->wqcfg.mode = 1;
+
+       wq->wqcfg.priority = wq->priority;
+
+       /* bytes 12-15 */
+       wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+       wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+
+       dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+       for (i = 0; i < 8; i++) {
+               wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+               iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+                       wq->id, i, wq_offset,
+                       ioread32(idxd->reg_base + wq_offset));
+       }
+
+       return 0;
+}
+
+static int idxd_wqs_config_write(struct idxd_device *idxd)
+{
+       int i, rc;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               rc = idxd_wq_config_write(wq);
+               if (rc < 0)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static void idxd_group_flags_setup(struct idxd_device *idxd)
+{
+       int i;
+
+       /* TC-A 0 and TC-B 1 should be defaults */
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               if (group->tc_a == -1)
+                       group->grpcfg.flags.tc_a = 0;
+               else
+                       group->grpcfg.flags.tc_a = group->tc_a;
+               if (group->tc_b == -1)
+                       group->grpcfg.flags.tc_b = 1;
+               else
+                       group->grpcfg.flags.tc_b = group->tc_b;
+               group->grpcfg.flags.use_token_limit = group->use_token_limit;
+               group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
+               if (group->tokens_allowed)
+                       group->grpcfg.flags.tokens_allowed =
+                               group->tokens_allowed;
+               else
+                       group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+       }
+}
+
+static int idxd_engines_setup(struct idxd_device *idxd)
+{
+       int i, engines = 0;
+       struct idxd_engine *eng;
+       struct idxd_group *group;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               group = &idxd->groups[i];
+               group->grpcfg.engines = 0;
+       }
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               eng = &idxd->engines[i];
+               group = eng->group;
+
+               if (!group)
+                       continue;
+
+               group->grpcfg.engines |= BIT(eng->id);
+               engines++;
+       }
+
+       if (!engines)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int idxd_wqs_setup(struct idxd_device *idxd)
+{
+       struct idxd_wq *wq;
+       struct idxd_group *group;
+       int i, j, configured = 0;
+       struct device *dev = &idxd->pdev->dev;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               group = &idxd->groups[i];
+               for (j = 0; j < 4; j++)
+                       group->grpcfg.wqs[j] = 0;
+       }
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               wq = &idxd->wqs[i];
+               group = wq->group;
+
+               if (!wq->group)
+                       continue;
+               if (!wq->size)
+                       continue;
+
+               if (!wq_dedicated(wq)) {
+                       dev_warn(dev, "No shared workqueue support.\n");
+                       return -EINVAL;
+               }
+
+               group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
+               configured++;
+       }
+
+       if (configured == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+int idxd_device_config(struct idxd_device *idxd)
+{
+       int rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       rc = idxd_wqs_setup(idxd);
+       if (rc < 0)
+               return rc;
+
+       rc = idxd_engines_setup(idxd);
+       if (rc < 0)
+               return rc;
+
+       idxd_group_flags_setup(idxd);
+
+       rc = idxd_wqs_config_write(idxd);
+       if (rc < 0)
+               return rc;
+
+       rc = idxd_groups_config_write(idxd);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
new file mode 100644 (file)
index 0000000..c64c142
--- /dev/null
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+{
+       return container_of(c, struct idxd_wq, dma_chan);
+}
+
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+                          enum idxd_complete_type comp_type)
+{
+       struct dma_async_tx_descriptor *tx;
+       struct dmaengine_result res;
+       int complete = 1;
+
+       if (desc->completion->status == DSA_COMP_SUCCESS)
+               res.result = DMA_TRANS_NOERROR;
+       else if (desc->completion->status)
+               res.result = DMA_TRANS_WRITE_FAILED;
+       else if (comp_type == IDXD_COMPLETE_ABORT)
+               res.result = DMA_TRANS_ABORTED;
+       else
+               complete = 0;
+
+       tx = &desc->txd;
+       if (complete && tx->cookie) {
+               dma_cookie_complete(tx);
+               dma_descriptor_unmap(tx);
+               dmaengine_desc_get_callback_invoke(tx, &res);
+               tx->callback = NULL;
+               tx->callback_result = NULL;
+       }
+}
+
+static void op_flag_setup(unsigned long flags, u32 *desc_flags)
+{
+       *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
+       if (flags & DMA_PREP_INTERRUPT)
+               *desc_flags |= IDXD_OP_FLAG_RCI;
+}
+
+static inline void set_completion_address(struct idxd_desc *desc,
+                                         u64 *compl_addr)
+{
+               *compl_addr = desc->compl_dma;
+}
+
+static inline void idxd_prep_desc_common(struct idxd_wq *wq,
+                                        struct dsa_hw_desc *hw, char opcode,
+                                        u64 addr_f1, u64 addr_f2, u64 len,
+                                        u64 compl, u32 flags)
+{
+       struct idxd_device *idxd = wq->idxd;
+
+       hw->flags = flags;
+       hw->opcode = opcode;
+       hw->src_addr = addr_f1;
+       hw->dst_addr = addr_f2;
+       hw->xfer_size = len;
+       hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+       hw->completion_addr = compl;
+
+       /*
+        * Descriptor completion vectors are 1-8 for MSIX. We will round
+        * robin through the 8 vectors.
+        */
+       wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+       hw->int_handle =  wq->vec_ptr;
+}
+
+static struct dma_async_tx_descriptor *
+idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+                      dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+       struct idxd_wq *wq = to_idxd_wq(c);
+       u32 desc_flags;
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_desc *desc;
+
+       if (wq->state != IDXD_WQ_ENABLED)
+               return NULL;
+
+       if (len > idxd->max_xfer_bytes)
+               return NULL;
+
+       op_flag_setup(flags, &desc_flags);
+       desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+       if (IS_ERR(desc))
+               return NULL;
+
+       idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
+                             dma_src, dma_dest, len, desc->compl_dma,
+                             desc_flags);
+
+       desc->txd.flags = flags;
+
+       return &desc->txd;
+}
+
+static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct idxd_wq *wq = to_idxd_wq(chan);
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       idxd_wq_get(wq);
+       dev_dbg(dev, "%s: client_count: %d\n", __func__,
+               idxd_wq_refcount(wq));
+       return 0;
+}
+
+static void idxd_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct idxd_wq *wq = to_idxd_wq(chan);
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       idxd_wq_put(wq);
+       dev_dbg(dev, "%s: client_count: %d\n", __func__,
+               idxd_wq_refcount(wq));
+}
+
+static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
+                                         dma_cookie_t cookie,
+                                         struct dma_tx_state *txstate)
+{
+       return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/*
+ * issue_pending() does not need to do anything since tx_submit() does the job
+ * already.
+ */
+static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+{
+}
+
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct dma_chan *c = tx->chan;
+       struct idxd_wq *wq = to_idxd_wq(c);
+       dma_cookie_t cookie;
+       int rc;
+       struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
+
+       cookie = dma_cookie_assign(tx);
+
+       rc = idxd_submit_desc(wq, desc);
+       if (rc < 0) {
+               idxd_free_desc(wq, desc);
+               return rc;
+       }
+
+       return cookie;
+}
+
+static void idxd_dma_release(struct dma_device *device)
+{
+}
+
+int idxd_register_dma_device(struct idxd_device *idxd)
+{
+       struct dma_device *dma = &idxd->dma_dev;
+
+       INIT_LIST_HEAD(&dma->channels);
+       dma->dev = &idxd->pdev->dev;
+
+       dma->device_release = idxd_dma_release;
+
+       if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
+               dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+               dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
+       }
+
+       dma->device_tx_status = idxd_dma_tx_status;
+       dma->device_issue_pending = idxd_dma_issue_pending;
+       dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+
+       return dma_async_device_register(&idxd->dma_dev);
+}
+
+void idxd_unregister_dma_device(struct idxd_device *idxd)
+{
+       dma_async_device_unregister(&idxd->dma_dev);
+}
+
+int idxd_register_dma_channel(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct dma_device *dma = &idxd->dma_dev;
+       struct dma_chan *chan = &wq->dma_chan;
+       int rc;
+
+       memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+       chan->device = dma;
+       list_add_tail(&chan->device_node, &dma->channels);
+       rc = dma_async_device_channel_register(dma, chan);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
+
+void idxd_unregister_dma_channel(struct idxd_wq *wq)
+{
+       dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
new file mode 100644 (file)
index 0000000..b8f8a36
--- /dev/null
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_H_
+#define _IDXD_H_
+
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include "registers.h"
+
+#define IDXD_DRIVER_VERSION    "1.00"
+
+extern struct kmem_cache *idxd_desc_pool;
+
+#define IDXD_REG_TIMEOUT       50
+#define IDXD_DRAIN_TIMEOUT     5000
+
+enum idxd_type {
+       IDXD_TYPE_UNKNOWN = -1,
+       IDXD_TYPE_DSA = 0,
+       IDXD_TYPE_MAX
+};
+
+#define IDXD_NAME_SIZE         128
+
+struct idxd_device_driver {
+       struct device_driver drv;
+};
+
+struct idxd_irq_entry {
+       struct idxd_device *idxd;
+       int id;
+       struct llist_head pending_llist;
+       struct list_head work_list;
+};
+
+struct idxd_group {
+       struct device conf_dev;
+       struct idxd_device *idxd;
+       struct grpcfg grpcfg;
+       int id;
+       int num_engines;
+       int num_wqs;
+       bool use_token_limit;
+       u8 tokens_allowed;
+       u8 tokens_reserved;
+       int tc_a;
+       int tc_b;
+};
+
+#define IDXD_MAX_PRIORITY      0xf
+
+enum idxd_wq_state {
+       IDXD_WQ_DISABLED = 0,
+       IDXD_WQ_ENABLED,
+};
+
+enum idxd_wq_flag {
+       WQ_FLAG_DEDICATED = 0,
+};
+
+enum idxd_wq_type {
+       IDXD_WQT_NONE = 0,
+       IDXD_WQT_KERNEL,
+       IDXD_WQT_USER,
+};
+
+struct idxd_cdev {
+       struct cdev cdev;
+       struct device *dev;
+       int minor;
+       struct wait_queue_head err_queue;
+};
+
+#define IDXD_ALLOCATED_BATCH_SIZE      128U
+#define WQ_NAME_SIZE   1024
+#define WQ_TYPE_SIZE   10
+
+enum idxd_op_type {
+       IDXD_OP_BLOCK = 0,
+       IDXD_OP_NONBLOCK = 1,
+};
+
+enum idxd_complete_type {
+       IDXD_COMPLETE_NORMAL = 0,
+       IDXD_COMPLETE_ABORT,
+};
+
+struct idxd_wq {
+       void __iomem *dportal;
+       struct device conf_dev;
+       struct idxd_cdev idxd_cdev;
+       struct idxd_device *idxd;
+       int id;
+       enum idxd_wq_type type;
+       struct idxd_group *group;
+       int client_count;
+       struct mutex wq_lock;   /* mutex for workqueue */
+       u32 size;
+       u32 threshold;
+       u32 priority;
+       enum idxd_wq_state state;
+       unsigned long flags;
+       union wqcfg wqcfg;
+       atomic_t dq_count;      /* dedicated queue flow control */
+       u32 vec_ptr;            /* interrupt steering */
+       struct dsa_hw_desc **hw_descs;
+       int num_descs;
+       struct dsa_completion_record *compls;
+       dma_addr_t compls_addr;
+       int compls_size;
+       struct idxd_desc **descs;
+       struct sbitmap sbmap;
+       struct dma_chan dma_chan;
+       struct percpu_rw_semaphore submit_lock;
+       wait_queue_head_t submit_waitq;
+       char name[WQ_NAME_SIZE + 1];
+};
+
+struct idxd_engine {
+       struct device conf_dev;
+       int id;
+       struct idxd_group *group;
+       struct idxd_device *idxd;
+};
+
+/* shadow registers */
+struct idxd_hw {
+       u32 version;
+       union gen_cap_reg gen_cap;
+       union wq_cap_reg wq_cap;
+       union group_cap_reg group_cap;
+       union engine_cap_reg engine_cap;
+       struct opcap opcap;
+};
+
+enum idxd_device_state {
+       IDXD_DEV_HALTED = -1,
+       IDXD_DEV_DISABLED = 0,
+       IDXD_DEV_CONF_READY,
+       IDXD_DEV_ENABLED,
+};
+
+enum idxd_device_flag {
+       IDXD_FLAG_CONFIGURABLE = 0,
+};
+
+struct idxd_device {
+       enum idxd_type type;
+       struct device conf_dev;
+       struct list_head list;
+       struct idxd_hw hw;
+       enum idxd_device_state state;
+       unsigned long flags;
+       int id;
+       int major;
+
+       struct pci_dev *pdev;
+       void __iomem *reg_base;
+
+       spinlock_t dev_lock;    /* spinlock for device */
+       struct idxd_group *groups;
+       struct idxd_wq *wqs;
+       struct idxd_engine *engines;
+
+       int num_groups;
+
+       u32 msix_perm_offset;
+       u32 wqcfg_offset;
+       u32 grpcfg_offset;
+       u32 perfmon_offset;
+
+       u64 max_xfer_bytes;
+       u32 max_batch_size;
+       int max_groups;
+       int max_engines;
+       int max_tokens;
+       int max_wqs;
+       int max_wq_size;
+       int token_limit;
+       int nr_tokens;          /* non-reserved tokens */
+
+       union sw_err_reg sw_err;
+
+       struct msix_entry *msix_entries;
+       int num_wq_irqs;
+       struct idxd_irq_entry *irq_entries;
+
+       struct dma_device dma_dev;
+};
+
+/* IDXD software descriptor */
+struct idxd_desc {
+       struct dsa_hw_desc *hw;
+       dma_addr_t desc_dma;
+       struct dsa_completion_record *completion;
+       dma_addr_t compl_dma;
+       struct dma_async_tx_descriptor txd;
+       struct llist_node llnode;
+       struct list_head list;
+       int id;
+       struct idxd_wq *wq;
+};
+
+#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
+#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+
+extern struct bus_type dsa_bus_type;
+
+static inline bool wq_dedicated(struct idxd_wq *wq)
+{
+       return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+enum idxd_portal_prot {
+       IDXD_PORTAL_UNLIMITED = 0,
+       IDXD_PORTAL_LIMITED,
+};
+
+static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
+{
+       return prot * 0x1000;
+}
+
+static inline int idxd_get_wq_portal_full_offset(int wq_id,
+                                                enum idxd_portal_prot prot)
+{
+       return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+}
+
+static inline void idxd_set_type(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+
+       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+               idxd->type = IDXD_TYPE_DSA;
+       else
+               idxd->type = IDXD_TYPE_UNKNOWN;
+}
+
+static inline void idxd_wq_get(struct idxd_wq *wq)
+{
+       wq->client_count++;
+}
+
+static inline void idxd_wq_put(struct idxd_wq *wq)
+{
+       wq->client_count--;
+}
+
+static inline int idxd_wq_refcount(struct idxd_wq *wq)
+{
+       return wq->client_count;
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd);
+int idxd_register_bus_type(void);
+void idxd_unregister_bus_type(void);
+int idxd_setup_sysfs(struct idxd_device *idxd);
+void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_driver(void);
+void idxd_unregister_driver(void);
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+
+/* device interrupt control */
+irqreturn_t idxd_irq_handler(int vec, void *data);
+irqreturn_t idxd_misc_thread(int vec, void *data);
+irqreturn_t idxd_wq_thread(int irq, void *data);
+void idxd_mask_error_interrupts(struct idxd_device *idxd);
+void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+void idxd_mask_msix_vectors(struct idxd_device *idxd);
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
+
+/* device control */
+int idxd_device_enable(struct idxd_device *idxd);
+int idxd_device_disable(struct idxd_device *idxd);
+int idxd_device_reset(struct idxd_device *idxd);
+int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_cleanup(struct idxd_device *idxd);
+int idxd_device_config(struct idxd_device *idxd);
+void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+
+/* work queue control */
+int idxd_wq_alloc_resources(struct idxd_wq *wq);
+void idxd_wq_free_resources(struct idxd_wq *wq);
+int idxd_wq_enable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_map_portal(struct idxd_wq *wq);
+void idxd_wq_unmap_portal(struct idxd_wq *wq);
+
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
+/* dmaengine */
+int idxd_register_dma_device(struct idxd_device *idxd);
+void idxd_unregister_dma_device(struct idxd_device *idxd);
+int idxd_register_dma_channel(struct idxd_wq *wq);
+void idxd_unregister_dma_channel(struct idxd_wq *wq);
+void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+                          enum idxd_complete_type comp_type);
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+/* cdev */
+int idxd_cdev_register(void);
+void idxd_cdev_remove(void);
+int idxd_cdev_get_major(struct idxd_device *idxd);
+int idxd_wq_add_cdev(struct idxd_wq *wq);
+void idxd_wq_del_cdev(struct idxd_wq *wq);
+
+#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
new file mode 100644 (file)
index 0000000..7778c05
--- /dev/null
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <uapi/linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+
+#define DRV_NAME "idxd"
+
+static struct idr idxd_idrs[IDXD_TYPE_MAX];
+static struct mutex idxd_idr_lock;
+
+static struct pci_device_id idxd_pci_tbl[] = {
+       /* DSA ver 1.0 platforms */
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+
+static char *idxd_name[] = {
+       "dsa",
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd)
+{
+       return idxd_name[idxd->type];
+}
+
+static int idxd_setup_interrupts(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       struct device *dev = &pdev->dev;
+       struct msix_entry *msix;
+       struct idxd_irq_entry *irq_entry;
+       int i, msixcnt;
+       int rc = 0;
+
+       msixcnt = pci_msix_vec_count(pdev);
+       if (msixcnt < 0) {
+               dev_err(dev, "Not MSI-X interrupt capable.\n");
+               goto err_no_irq;
+       }
+
+       idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+                       msixcnt, GFP_KERNEL);
+       if (!idxd->msix_entries) {
+               rc = -ENOMEM;
+               goto err_no_irq;
+       }
+
+       for (i = 0; i < msixcnt; i++)
+               idxd->msix_entries[i].entry = i;
+
+       rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+       if (rc) {
+               dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+               goto err_no_irq;
+       }
+       dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+
+       /*
+        * We implement 1 completion list per MSI-X entry except for
+        * entry 0, which is for errors and others.
+        */
+       idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+                                        sizeof(struct idxd_irq_entry),
+                                        GFP_KERNEL);
+       if (!idxd->irq_entries) {
+               rc = -ENOMEM;
+               goto err_no_irq;
+       }
+
+       for (i = 0; i < msixcnt; i++) {
+               idxd->irq_entries[i].id = i;
+               idxd->irq_entries[i].idxd = idxd;
+       }
+
+       msix = &idxd->msix_entries[0];
+       irq_entry = &idxd->irq_entries[0];
+       rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+                                      idxd_misc_thread, 0, "idxd-misc",
+                                      irq_entry);
+       if (rc < 0) {
+               dev_err(dev, "Failed to allocate misc interrupt.\n");
+               goto err_no_irq;
+       }
+
+       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+               msix->vector);
+
+       /* first MSI-X entry is not for wq interrupts */
+       idxd->num_wq_irqs = msixcnt - 1;
+
+       for (i = 1; i < msixcnt; i++) {
+               msix = &idxd->msix_entries[i];
+               irq_entry = &idxd->irq_entries[i];
+
+               init_llist_head(&idxd->irq_entries[i].pending_llist);
+               INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+               rc = devm_request_threaded_irq(dev, msix->vector,
+                                              idxd_irq_handler,
+                                              idxd_wq_thread, 0,
+                                              "idxd-portal", irq_entry);
+               if (rc < 0) {
+                       dev_err(dev, "Failed to allocate irq %d.\n",
+                               msix->vector);
+                       goto err_no_irq;
+               }
+               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+                       i, msix->vector);
+       }
+
+       idxd_unmask_error_interrupts(idxd);
+
+       return 0;
+
+ err_no_irq:
+       /* Disable error interrupt generation */
+       idxd_mask_error_interrupts(idxd);
+       pci_disable_msix(pdev);
+       dev_err(dev, "No usable interrupts\n");
+       return rc;
+}
+
+static void idxd_wqs_free_lock(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               percpu_free_rwsem(&wq->submit_lock);
+       }
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i;
+
+       idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+                                   sizeof(struct idxd_group), GFP_KERNEL);
+       if (!idxd->groups)
+               return -ENOMEM;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               idxd->groups[i].idxd = idxd;
+               idxd->groups[i].id = i;
+               idxd->groups[i].tc_a = -1;
+               idxd->groups[i].tc_b = -1;
+       }
+
+       idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+                                GFP_KERNEL);
+       if (!idxd->wqs)
+               return -ENOMEM;
+
+       idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+                                    sizeof(struct idxd_engine), GFP_KERNEL);
+       if (!idxd->engines)
+               return -ENOMEM;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+               int rc;
+
+               wq->id = i;
+               wq->idxd = idxd;
+               mutex_init(&wq->wq_lock);
+               atomic_set(&wq->dq_count, 0);
+               init_waitqueue_head(&wq->submit_waitq);
+               wq->idxd_cdev.minor = -1;
+               rc = percpu_init_rwsem(&wq->submit_lock);
+               if (rc < 0) {
+                       idxd_wqs_free_lock(idxd);
+                       return rc;
+               }
+       }
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               idxd->engines[i].idxd = idxd;
+               idxd->engines[i].id = i;
+       }
+
+       return 0;
+}
+
+static void idxd_read_table_offsets(struct idxd_device *idxd)
+{
+       union offsets_reg offsets;
+       struct device *dev = &idxd->pdev->dev;
+
+       offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
+       offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
+                       + sizeof(u64));
+       idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+       dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
+       idxd->wqcfg_offset = offsets.wqcfg * 0x100;
+       dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
+               idxd->wqcfg_offset);
+       idxd->msix_perm_offset = offsets.msix_perm * 0x100;
+       dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
+               idxd->msix_perm_offset);
+       idxd->perfmon_offset = offsets.perfmon * 0x100;
+       dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
+}
+
+static void idxd_read_caps(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i;
+
+       /* reading generic capabilities */
+       idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
+       dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+       idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
+       dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
+       idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+       dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+       if (idxd->hw.gen_cap.config_en)
+               set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
+
+       /* reading group capabilities */
+       idxd->hw.group_cap.bits =
+               ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
+       dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
+       idxd->max_groups = idxd->hw.group_cap.num_groups;
+       dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
+       idxd->max_tokens = idxd->hw.group_cap.total_tokens;
+       dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
+       idxd->nr_tokens = idxd->max_tokens;
+
+       /* read engine capabilities */
+       idxd->hw.engine_cap.bits =
+               ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
+       dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
+       idxd->max_engines = idxd->hw.engine_cap.num_engines;
+       dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
+
+       /* read workqueue capabilities */
+       idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
+       dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
+       idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
+       dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+       idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+       dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+
+       /* reading operation capabilities */
+       for (i = 0; i < 4; i++) {
+               idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
+                               IDXD_OPCAP_OFFSET + i * sizeof(u64));
+               dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
+       }
+}
+
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
+                                     void __iomem * const *iomap)
+{
+       struct device *dev = &pdev->dev;
+       struct idxd_device *idxd;
+
+       idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+       if (!idxd)
+               return NULL;
+
+       idxd->pdev = pdev;
+       idxd->reg_base = iomap[IDXD_MMIO_BAR];
+       spin_lock_init(&idxd->dev_lock);
+
+       return idxd;
+}
+
+static int idxd_probe(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       struct device *dev = &pdev->dev;
+       int rc;
+
+       dev_dbg(dev, "%s entered and resetting device\n", __func__);
+       rc = idxd_device_reset(idxd);
+       if (rc < 0)
+               return rc;
+       dev_dbg(dev, "IDXD reset complete\n");
+
+       idxd_read_caps(idxd);
+       idxd_read_table_offsets(idxd);
+
+       rc = idxd_setup_internals(idxd);
+       if (rc)
+               goto err_setup;
+
+       rc = idxd_setup_interrupts(idxd);
+       if (rc)
+               goto err_setup;
+
+       dev_dbg(dev, "IDXD interrupt setup complete.\n");
+
+       mutex_lock(&idxd_idr_lock);
+       idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+       mutex_unlock(&idxd_idr_lock);
+       if (idxd->id < 0) {
+               rc = -ENOMEM;
+               goto err_idr_fail;
+       }
+
+       idxd->major = idxd_cdev_get_major(idxd);
+
+       dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+       return 0;
+
+ err_idr_fail:
+       idxd_mask_error_interrupts(idxd);
+       idxd_mask_msix_vectors(idxd);
+ err_setup:
+       return rc;
+}
+
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       void __iomem * const *iomap;
+       struct device *dev = &pdev->dev;
+       struct idxd_device *idxd;
+       int rc;
+       unsigned int mask;
+
+       rc = pcim_enable_device(pdev);
+       if (rc)
+               return rc;
+
+       dev_dbg(dev, "Mapping BARs\n");
+       mask = (1 << IDXD_MMIO_BAR);
+       rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
+       if (rc)
+               return rc;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return -ENOMEM;
+
+       dev_dbg(dev, "Set DMA masks\n");
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       dev_dbg(dev, "Alloc IDXD context\n");
+       idxd = idxd_alloc(pdev, iomap);
+       if (!idxd)
+               return -ENOMEM;
+
+       idxd_set_type(idxd);
+
+       dev_dbg(dev, "Set PCI master\n");
+       pci_set_master(pdev);
+       pci_set_drvdata(pdev, idxd);
+
+       idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+       rc = idxd_probe(idxd);
+       if (rc) {
+               dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+               return -ENODEV;
+       }
+
+       rc = idxd_setup_sysfs(idxd);
+       if (rc) {
+               dev_err(dev, "IDXD sysfs setup failed\n");
+               return -ENODEV;
+       }
+
+       idxd->state = IDXD_DEV_CONF_READY;
+
+       dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+                idxd->hw.version);
+
+       return 0;
+}
+
+static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+{
+       struct idxd_desc *desc, *itr;
+       struct llist_node *head;
+
+       head = llist_del_all(&ie->pending_llist);
+       if (!head)
+               return;
+
+       llist_for_each_entry_safe(desc, itr, head, llnode) {
+               idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+               idxd_free_desc(desc->wq, desc);
+       }
+}
+
+static void idxd_flush_work_list(struct idxd_irq_entry *ie)
+{
+       struct idxd_desc *desc, *iter;
+
+       list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+               list_del(&desc->list);
+               idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+               idxd_free_desc(desc->wq, desc);
+       }
+}
+
+static void idxd_shutdown(struct pci_dev *pdev)
+{
+       struct idxd_device *idxd = pci_get_drvdata(pdev);
+       int rc, i;
+       struct idxd_irq_entry *irq_entry;
+       int msixcnt = pci_msix_vec_count(pdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       rc = idxd_device_disable(idxd);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       if (rc)
+               dev_err(&pdev->dev, "Disabling device failed\n");
+
+       dev_dbg(&pdev->dev, "%s called\n", __func__);
+       idxd_mask_msix_vectors(idxd);
+       idxd_mask_error_interrupts(idxd);
+
+       for (i = 0; i < msixcnt; i++) {
+               irq_entry = &idxd->irq_entries[i];
+               synchronize_irq(idxd->msix_entries[i].vector);
+               if (i == 0)
+                       continue;
+               idxd_flush_pending_llist(irq_entry);
+               idxd_flush_work_list(irq_entry);
+       }
+}
+
+static void idxd_remove(struct pci_dev *pdev)
+{
+       struct idxd_device *idxd = pci_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s called\n", __func__);
+       idxd_cleanup_sysfs(idxd);
+       idxd_shutdown(pdev);
+       idxd_wqs_free_lock(idxd);
+       mutex_lock(&idxd_idr_lock);
+       idr_remove(&idxd_idrs[idxd->type], idxd->id);
+       mutex_unlock(&idxd_idr_lock);
+}
+
+static struct pci_driver idxd_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = idxd_pci_tbl,
+       .probe          = idxd_pci_probe,
+       .remove         = idxd_remove,
+       .shutdown       = idxd_shutdown,
+};
+
+static int __init idxd_init_module(void)
+{
+       int err, i;
+
+       /*
+        * If the CPU does not support write512, there's no point in
+        * enumerating the device. We can not utilize it.
+        */
+       if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+               pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+               return -ENODEV;
+       }
+
+       pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
+               DRV_NAME, IDXD_DRIVER_VERSION);
+
+       mutex_init(&idxd_idr_lock);
+       for (i = 0; i < IDXD_TYPE_MAX; i++)
+               idr_init(&idxd_idrs[i]);
+
+       err = idxd_register_bus_type();
+       if (err < 0)
+               return err;
+
+       err = idxd_register_driver();
+       if (err < 0)
+               goto err_idxd_driver_register;
+
+       err = idxd_cdev_register();
+       if (err)
+               goto err_cdev_register;
+
+       err = pci_register_driver(&idxd_pci_driver);
+       if (err)
+               goto err_pci_register;
+
+       return 0;
+
+err_pci_register:
+       idxd_cdev_remove();
+err_cdev_register:
+       idxd_unregister_driver();
+err_idxd_driver_register:
+       idxd_unregister_bus_type();
+       return err;
+}
+module_init(idxd_init_module);
+
+static void __exit idxd_exit_module(void)
+{
+       pci_unregister_driver(&idxd_pci_driver);
+       idxd_cdev_remove();
+       idxd_unregister_bus_type();
+}
+module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
new file mode 100644 (file)
index 0000000..d6fcd2e
--- /dev/null
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+       int i;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               wq->state = IDXD_WQ_DISABLED;
+       }
+}
+
+static int idxd_restart(struct idxd_device *idxd)
+{
+       int i, rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+
+       rc = __idxd_device_reset(idxd);
+       if (rc < 0)
+               goto out;
+
+       rc = idxd_device_config(idxd);
+       if (rc < 0)
+               goto out;
+
+       rc = idxd_device_enable(idxd);
+       if (rc < 0)
+               goto out;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               if (wq->state == IDXD_WQ_ENABLED) {
+                       rc = idxd_wq_enable(wq);
+                       if (rc < 0) {
+                               dev_warn(&idxd->pdev->dev,
+                                        "Unable to re-enable wq %s\n",
+                                        dev_name(&wq->conf_dev));
+                       }
+               }
+       }
+
+       return 0;
+
+ out:
+       idxd_device_wqs_clear_state(idxd);
+       idxd->state = IDXD_DEV_HALTED;
+       return rc;
+}
+
+irqreturn_t idxd_irq_handler(int vec, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       struct idxd_device *idxd = irq_entry->idxd;
+
+       idxd_mask_msix_vector(idxd, irq_entry->id);
+       return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       struct idxd_device *idxd = irq_entry->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       union gensts_reg gensts;
+       u32 cause, val = 0;
+       int i, rc;
+       bool err = false;
+
+       cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+       if (cause & IDXD_INTC_ERR) {
+               spin_lock_bh(&idxd->dev_lock);
+               for (i = 0; i < 4; i++)
+                       idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+                                       IDXD_SWERR_OFFSET + i * sizeof(u64));
+               iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+               if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+                       int id = idxd->sw_err.wq_idx;
+                       struct idxd_wq *wq = &idxd->wqs[id];
+
+                       if (wq->type == IDXD_WQT_USER)
+                               wake_up_interruptible(&wq->idxd_cdev.err_queue);
+               } else {
+                       int i;
+
+                       for (i = 0; i < idxd->max_wqs; i++) {
+                               struct idxd_wq *wq = &idxd->wqs[i];
+
+                               if (wq->type == IDXD_WQT_USER)
+                                       wake_up_interruptible(&wq->idxd_cdev.err_queue);
+                       }
+               }
+
+               spin_unlock_bh(&idxd->dev_lock);
+               val |= IDXD_INTC_ERR;
+
+               for (i = 0; i < 4; i++)
+                       dev_warn(dev, "err[%d]: %#16.16llx\n",
+                                i, idxd->sw_err.bits[i]);
+               err = true;
+       }
+
+       if (cause & IDXD_INTC_CMD) {
+               /* Driver does use command interrupts */
+               val |= IDXD_INTC_CMD;
+       }
+
+       if (cause & IDXD_INTC_OCCUPY) {
+               /* Driver does not utilize occupancy interrupt */
+               val |= IDXD_INTC_OCCUPY;
+       }
+
+       if (cause & IDXD_INTC_PERFMON_OVFL) {
+               /*
+                * Driver does not utilize perfmon counter overflow interrupt
+                * yet.
+                */
+               val |= IDXD_INTC_PERFMON_OVFL;
+       }
+
+       val ^= cause;
+       if (val)
+               dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
+                             val);
+
+       iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+       if (!err)
+               return IRQ_HANDLED;
+
+       gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+       if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+               spin_lock_bh(&idxd->dev_lock);
+               if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+                       rc = idxd_restart(idxd);
+                       if (rc < 0)
+                               dev_err(&idxd->pdev->dev,
+                                       "idxd restart failed, device halt.");
+               } else {
+                       idxd_device_wqs_clear_state(idxd);
+                       idxd->state = IDXD_DEV_HALTED;
+                       dev_err(&idxd->pdev->dev,
+                               "idxd halted, need %s.\n",
+                               gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
+                               "FLR" : "system reset");
+               }
+               spin_unlock_bh(&idxd->dev_lock);
+       }
+
+       idxd_unmask_msix_vector(idxd, irq_entry->id);
+       return IRQ_HANDLED;
+}
+
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+                                    int *processed)
+{
+       struct idxd_desc *desc, *t;
+       struct llist_node *head;
+       int queued = 0;
+
+       head = llist_del_all(&irq_entry->pending_llist);
+       if (!head)
+               return 0;
+
+       llist_for_each_entry_safe(desc, t, head, llnode) {
+               if (desc->completion->status) {
+                       idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+                       idxd_free_desc(desc->wq, desc);
+                       (*processed)++;
+               } else {
+                       list_add_tail(&desc->list, &irq_entry->work_list);
+                       queued++;
+               }
+       }
+
+       return queued;
+}
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+                                int *processed)
+{
+       struct list_head *node, *next;
+       int queued = 0;
+
+       if (list_empty(&irq_entry->work_list))
+               return 0;
+
+       list_for_each_safe(node, next, &irq_entry->work_list) {
+               struct idxd_desc *desc =
+                       container_of(node, struct idxd_desc, list);
+
+               if (desc->completion->status) {
+                       list_del(&desc->list);
+                       /* process and callback */
+                       idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+                       idxd_free_desc(desc->wq, desc);
+                       (*processed)++;
+               } else {
+                       queued++;
+               }
+       }
+
+       return queued;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       int rc, processed = 0, retry = 0;
+
+       /*
+        * There are two lists we are processing. The pending_llist is where
+        * submmiter adds all the submitted descriptor after sending it to
+        * the workqueue. It's a lockless singly linked list. The work_list
+        * is the common linux double linked list. We are in a scenario of
+        * multiple producers and a single consumer. The producers are all
+        * the kernel submitters of descriptors, and the consumer is the
+        * kernel irq handler thread for the msix vector when using threaded
+        * irq. To work with the restrictions of llist to remain lockless,
+        * we are doing the following steps:
+        * 1. Iterate through the work_list and process any completed
+        *    descriptor. Delete the completed entries during iteration.
+        * 2. llist_del_all() from the pending list.
+        * 3. Iterate through the llist that was deleted from the pending list
+        *    and process the completed entries.
+        * 4. If the entry is still waiting on hardware, list_add_tail() to
+        *    the work_list.
+        * 5. Repeat until no more descriptors.
+        */
+       do {
+               rc = irq_process_work_list(irq_entry, &processed);
+               if (rc != 0) {
+                       retry++;
+                       continue;
+               }
+
+               rc = irq_process_pending_llist(irq_entry, &processed);
+       } while (rc != 0 && retry != 10);
+
+       idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+
+       if (processed == 0)
+               return IRQ_NONE;
+
+       return IRQ_HANDLED;
+}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
new file mode 100644 (file)
index 0000000..a39e7ae
--- /dev/null
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_REGISTERS_H_
+#define _IDXD_REGISTERS_H_
+
+/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0   0x0b25
+
+#define IDXD_MMIO_BAR          0
+#define IDXD_WQ_BAR            2
+#define IDXD_PORTAL_SIZE       0x4000
+
+/* MMIO Device BAR0 Registers */
+#define IDXD_VER_OFFSET                        0x00
+#define IDXD_VER_MAJOR_MASK            0xf0
+#define IDXD_VER_MINOR_MASK            0x0f
+#define GET_IDXD_VER_MAJOR(x)          (((x) & IDXD_VER_MAJOR_MASK) >> 4)
+#define GET_IDXD_VER_MINOR(x)          ((x) & IDXD_VER_MINOR_MASK)
+
+union gen_cap_reg {
+       struct {
+               u64 block_on_fault:1;
+               u64 overlap_copy:1;
+               u64 cache_control_mem:1;
+               u64 cache_control_cache:1;
+               u64 rsvd:3;
+               u64 int_handle_req:1;
+               u64 dest_readback:1;
+               u64 drain_readback:1;
+               u64 rsvd2:6;
+               u64 max_xfer_shift:5;
+               u64 max_batch_shift:4;
+               u64 max_ims_mult:6;
+               u64 config_en:1;
+               u64 max_descs_per_engine:8;
+               u64 rsvd3:24;
+       };
+       u64 bits;
+} __packed;
+#define IDXD_GENCAP_OFFSET             0x10
+
+union wq_cap_reg {
+       struct {
+               u64 total_wq_size:16;
+               u64 num_wqs:8;
+               u64 rsvd:24;
+               u64 shared_mode:1;
+               u64 dedicated_mode:1;
+               u64 rsvd2:1;
+               u64 priority:1;
+               u64 occupancy:1;
+               u64 occupancy_int:1;
+               u64 rsvd3:10;
+       };
+       u64 bits;
+} __packed;
+#define IDXD_WQCAP_OFFSET              0x20
+
+union group_cap_reg {
+       struct {
+               u64 num_groups:8;
+               u64 total_tokens:8;
+               u64 token_en:1;
+               u64 token_limit:1;
+               u64 rsvd:46;
+       };
+       u64 bits;
+} __packed;
+#define IDXD_GRPCAP_OFFSET             0x30
+
+union engine_cap_reg {
+       struct {
+               u64 num_engines:8;
+               u64 rsvd:56;
+       };
+       u64 bits;
+} __packed;
+
+#define IDXD_ENGCAP_OFFSET             0x38
+
+#define IDXD_OPCAP_NOOP                        0x0001
+#define IDXD_OPCAP_BATCH                       0x0002
+#define IDXD_OPCAP_MEMMOVE             0x0008
+struct opcap {
+       u64 bits[4];
+};
+
+#define IDXD_OPCAP_OFFSET              0x40
+
+#define IDXD_TABLE_OFFSET              0x60
+union offsets_reg {
+       struct {
+               u64 grpcfg:16;
+               u64 wqcfg:16;
+               u64 msix_perm:16;
+               u64 ims:16;
+               u64 perfmon:16;
+               u64 rsvd:48;
+       };
+       u64 bits[2];
+} __packed;
+
+#define IDXD_GENCFG_OFFSET             0x80
+union gencfg_reg {
+       struct {
+               u32 token_limit:8;
+               u32 rsvd:4;
+               u32 user_int_en:1;
+               u32 rsvd2:19;
+       };
+       u32 bits;
+} __packed;
+
+#define IDXD_GENCTRL_OFFSET            0x88
+union genctrl_reg {
+       struct {
+               u32 softerr_int_en:1;
+               u32 rsvd:31;
+       };
+       u32 bits;
+} __packed;
+
+#define IDXD_GENSTATS_OFFSET           0x90
+union gensts_reg {
+       struct {
+               u32 state:2;
+               u32 reset_type:2;
+               u32 rsvd:28;
+       };
+       u32 bits;
+} __packed;
+
+enum idxd_device_status_state {
+       IDXD_DEVICE_STATE_DISABLED = 0,
+       IDXD_DEVICE_STATE_ENABLED,
+       IDXD_DEVICE_STATE_DRAIN,
+       IDXD_DEVICE_STATE_HALT,
+};
+
+enum idxd_device_reset_type {
+       IDXD_DEVICE_RESET_SOFTWARE = 0,
+       IDXD_DEVICE_RESET_FLR,
+       IDXD_DEVICE_RESET_WARM,
+       IDXD_DEVICE_RESET_COLD,
+};
+
+#define IDXD_INTCAUSE_OFFSET           0x98
+#define IDXD_INTC_ERR                  0x01
+#define IDXD_INTC_CMD                  0x02
+#define IDXD_INTC_OCCUPY                       0x04
+#define IDXD_INTC_PERFMON_OVFL         0x08
+
+#define IDXD_CMD_OFFSET                        0xa0
+union idxd_command_reg {
+       struct {
+               u32 operand:20;
+               u32 cmd:5;
+               u32 rsvd:6;
+               u32 int_req:1;
+       };
+       u32 bits;
+} __packed;
+
+enum idxd_cmd {
+       IDXD_CMD_ENABLE_DEVICE = 1,
+       IDXD_CMD_DISABLE_DEVICE,
+       IDXD_CMD_DRAIN_ALL,
+       IDXD_CMD_ABORT_ALL,
+       IDXD_CMD_RESET_DEVICE,
+       IDXD_CMD_ENABLE_WQ,
+       IDXD_CMD_DISABLE_WQ,
+       IDXD_CMD_DRAIN_WQ,
+       IDXD_CMD_ABORT_WQ,
+       IDXD_CMD_RESET_WQ,
+       IDXD_CMD_DRAIN_PASID,
+       IDXD_CMD_ABORT_PASID,
+       IDXD_CMD_REQUEST_INT_HANDLE,
+};
+
+#define IDXD_CMDSTS_OFFSET             0xa8
+union cmdsts_reg {
+       struct {
+               u8 err;
+               u16 result;
+               u8 rsvd:7;
+               u8 active:1;
+       };
+       u32 bits;
+} __packed;
+#define IDXD_CMDSTS_ACTIVE             0x80000000
+
+enum idxd_cmdsts_err {
+       IDXD_CMDSTS_SUCCESS = 0,
+       IDXD_CMDSTS_INVAL_CMD,
+       IDXD_CMDSTS_INVAL_WQIDX,
+       IDXD_CMDSTS_HW_ERR,
+       /* enable device errors */
+       IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
+       IDXD_CMDSTS_ERR_CONFIG,
+       IDXD_CMDSTS_ERR_BUSMASTER_EN,
+       IDXD_CMDSTS_ERR_PASID_INVAL,
+       IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
+       IDXD_CMDSTS_ERR_GRP_CONFIG,
+       IDXD_CMDSTS_ERR_GRP_CONFIG2,
+       IDXD_CMDSTS_ERR_GRP_CONFIG3,
+       IDXD_CMDSTS_ERR_GRP_CONFIG4,
+       /* enable wq errors */
+       IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
+       IDXD_CMDSTS_ERR_WQ_ENABLED,
+       IDXD_CMDSTS_ERR_WQ_SIZE,
+       IDXD_CMDSTS_ERR_WQ_PRIOR,
+       IDXD_CMDSTS_ERR_WQ_MODE,
+       IDXD_CMDSTS_ERR_BOF_EN,
+       IDXD_CMDSTS_ERR_PASID_EN,
+       IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
+       IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
+       /* disable device errors */
+       IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
+       /* disable WQ, drain WQ, abort WQ, reset WQ */
+       IDXD_CMDSTS_ERR_DEV_NOT_EN,
+       /* request interrupt handle */
+       IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
+       IDXD_CMDSTS_ERR_NO_HANDLE,
+};
+
+#define IDXD_SWERR_OFFSET              0xc0
+#define IDXD_SWERR_VALID               0x00000001
+#define IDXD_SWERR_OVERFLOW            0x00000002
+#define IDXD_SWERR_ACK                 (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
+union sw_err_reg {
+       struct {
+               u64 valid:1;
+               u64 overflow:1;
+               u64 desc_valid:1;
+               u64 wq_idx_valid:1;
+               u64 batch:1;
+               u64 fault_rw:1;
+               u64 priv:1;
+               u64 rsvd:1;
+               u64 error:8;
+               u64 wq_idx:8;
+               u64 rsvd2:8;
+               u64 operation:8;
+               u64 pasid:20;
+               u64 rsvd3:4;
+
+               u64 batch_idx:16;
+               u64 rsvd4:16;
+               u64 invalid_flags:32;
+
+               u64 fault_addr;
+
+               u64 rsvd5;
+       };
+       u64 bits[4];
+} __packed;
+
+union msix_perm {
+       struct {
+               u32 rsvd:2;
+               u32 ignore:1;
+               u32 pasid_en:1;
+               u32 rsvd2:8;
+               u32 pasid:20;
+       };
+       u32 bits;
+} __packed;
+
+union group_flags {
+       struct {
+               u32 tc_a:3;
+               u32 tc_b:3;
+               u32 rsvd:1;
+               u32 use_token_limit:1;
+               u32 tokens_reserved:8;
+               u32 rsvd2:4;
+               u32 tokens_allowed:8;
+               u32 rsvd3:4;
+       };
+       u32 bits;
+} __packed;
+
+struct grpcfg {
+       u64 wqs[4];
+       u64 engines;
+       union group_flags flags;
+} __packed;
+
+union wqcfg {
+       struct {
+               /* bytes 0-3 */
+               u16 wq_size;
+               u16 rsvd;
+
+               /* bytes 4-7 */
+               u16 wq_thresh;
+               u16 rsvd1;
+
+               /* bytes 8-11 */
+               u32 mode:1;     /* shared or dedicated */
+               u32 bof:1;      /* block on fault */
+               u32 rsvd2:2;
+               u32 priority:4;
+               u32 pasid:20;
+               u32 pasid_en:1;
+               u32 priv:1;
+               u32 rsvd3:2;
+
+               /* bytes 12-15 */
+               u32 max_xfer_shift:5;
+               u32 max_batch_shift:4;
+               u32 rsvd4:23;
+
+               /* bytes 16-19 */
+               u16 occupancy_inth;
+               u16 occupancy_table_sel:1;
+               u16 rsvd5:15;
+
+               /* bytes 20-23 */
+               u16 occupancy_limit;
+               u16 occupancy_int_en:1;
+               u16 rsvd6:15;
+
+               /* bytes 24-27 */
+               u16 occupancy;
+               u16 occupancy_int:1;
+               u16 rsvd7:12;
+               u16 mode_support:1;
+               u16 wq_state:2;
+
+               /* bytes 28-31 */
+               u32 rsvd8;
+       };
+       u32 bits[8];
+} __packed;
+#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
new file mode 100644 (file)
index 0000000..45a0c58
--- /dev/null
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+       struct idxd_desc *desc;
+       int idx;
+       struct idxd_device *idxd = wq->idxd;
+
+       if (idxd->state != IDXD_DEV_ENABLED)
+               return ERR_PTR(-EIO);
+
+       if (optype == IDXD_OP_BLOCK)
+               percpu_down_read(&wq->submit_lock);
+       else if (!percpu_down_read_trylock(&wq->submit_lock))
+               return ERR_PTR(-EBUSY);
+
+       if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+               int rc;
+
+               if (optype == IDXD_OP_NONBLOCK) {
+                       percpu_up_read(&wq->submit_lock);
+                       return ERR_PTR(-EAGAIN);
+               }
+
+               percpu_up_read(&wq->submit_lock);
+               percpu_down_write(&wq->submit_lock);
+               rc = wait_event_interruptible(wq->submit_waitq,
+                                             atomic_add_unless(&wq->dq_count,
+                                                               1, wq->size) ||
+                                              idxd->state != IDXD_DEV_ENABLED);
+               percpu_up_write(&wq->submit_lock);
+               if (rc < 0)
+                       return ERR_PTR(-EINTR);
+               if (idxd->state != IDXD_DEV_ENABLED)
+                       return ERR_PTR(-EIO);
+       } else {
+               percpu_up_read(&wq->submit_lock);
+       }
+
+       idx = sbitmap_get(&wq->sbmap, 0, false);
+       if (idx < 0) {
+               atomic_dec(&wq->dq_count);
+               return ERR_PTR(-EAGAIN);
+       }
+
+       desc = wq->descs[idx];
+       memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+       memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+       return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+       atomic_dec(&wq->dq_count);
+
+       sbitmap_clear_bit(&wq->sbmap, desc->id);
+       wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+       struct idxd_device *idxd = wq->idxd;
+       int vec = desc->hw->int_handle;
+       void __iomem *portal;
+
+       if (idxd->state != IDXD_DEV_ENABLED)
+               return -EIO;
+
+       portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+       /*
+        * The wmb() flushes writes to coherent DMA data before possibly
+        * triggering a DMA read. The wmb() is necessary even on UP because
+        * the recipient is a device.
+        */
+       wmb();
+       iosubmit_cmds512(portal, desc->hw, 1);
+
+       /*
+        * Pending the descriptor to the lockless list for the irq_entry
+        * that we designated the descriptor to.
+        */
+       if (desc->hw->flags & IDXD_OP_FLAG_RCI)
+               llist_add(&desc->llnode,
+                         &idxd->irq_entries[vec].pending_llist);
+
+       return 0;
+}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
new file mode 100644 (file)
index 0000000..849c50a
--- /dev/null
@@ -0,0 +1,1528 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+static char *idxd_wq_type_names[] = {
+       [IDXD_WQT_NONE]         = "none",
+       [IDXD_WQT_KERNEL]       = "kernel",
+       [IDXD_WQT_USER]         = "user",
+};
+
+static void idxd_conf_device_release(struct device *dev)
+{
+       dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+}
+
+static struct device_type idxd_group_device_type = {
+       .name = "group",
+       .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_wq_device_type = {
+       .name = "wq",
+       .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_engine_device_type = {
+       .name = "engine",
+       .release = idxd_conf_device_release,
+};
+
+static struct device_type dsa_device_type = {
+       .name = "dsa",
+       .release = idxd_conf_device_release,
+};
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+       return dev ? dev->type == &dsa_device_type : false;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+       return is_dsa_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+       return dev ? dev->type == &idxd_wq_device_type : false;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+       if (wq->type == IDXD_WQT_KERNEL &&
+           strcmp(wq->name, "dmaengine") == 0)
+               return true;
+       return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+       return wq->type == IDXD_WQT_USER ? true : false;
+}
+
+static int idxd_config_bus_match(struct device *dev,
+                                struct device_driver *drv)
+{
+       int matched = 0;
+
+       if (is_idxd_dev(dev)) {
+               struct idxd_device *idxd = confdev_to_idxd(dev);
+
+               if (idxd->state != IDXD_DEV_CONF_READY)
+                       return 0;
+               matched = 1;
+       } else if (is_idxd_wq_dev(dev)) {
+               struct idxd_wq *wq = confdev_to_wq(dev);
+               struct idxd_device *idxd = wq->idxd;
+
+               if (idxd->state < IDXD_DEV_CONF_READY)
+                       return 0;
+
+               if (wq->state != IDXD_WQ_DISABLED) {
+                       dev_dbg(dev, "%s not disabled\n", dev_name(dev));
+                       return 0;
+               }
+               matched = 1;
+       }
+
+       if (matched)
+               dev_dbg(dev, "%s matched\n", dev_name(dev));
+
+       return matched;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+       int rc;
+       unsigned long flags;
+
+       dev_dbg(dev, "%s called\n", __func__);
+
+       if (is_idxd_dev(dev)) {
+               struct idxd_device *idxd = confdev_to_idxd(dev);
+
+               if (idxd->state != IDXD_DEV_CONF_READY) {
+                       dev_warn(dev, "Device not ready for config\n");
+                       return -EBUSY;
+               }
+
+               if (!try_module_get(THIS_MODULE))
+                       return -ENXIO;
+
+               spin_lock_irqsave(&idxd->dev_lock, flags);
+
+               /* Perform IDXD configuration and enabling */
+               rc = idxd_device_config(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       dev_warn(dev, "Device config failed: %d\n", rc);
+                       return rc;
+               }
+
+               /* start device */
+               rc = idxd_device_enable(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       dev_warn(dev, "Device enable failed: %d\n", rc);
+                       return rc;
+               }
+
+               spin_unlock_irqrestore(&idxd->dev_lock, flags);
+               dev_info(dev, "Device %s enabled\n", dev_name(dev));
+
+               rc = idxd_register_dma_device(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       dev_dbg(dev, "Failed to register dmaengine device\n");
+                       return rc;
+               }
+               return 0;
+       } else if (is_idxd_wq_dev(dev)) {
+               struct idxd_wq *wq = confdev_to_wq(dev);
+               struct idxd_device *idxd = wq->idxd;
+
+               mutex_lock(&wq->wq_lock);
+
+               if (idxd->state != IDXD_DEV_ENABLED) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "Enabling while device not enabled.\n");
+                       return -EPERM;
+               }
+
+               if (wq->state != IDXD_WQ_DISABLED) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+                       return -EBUSY;
+               }
+
+               if (!wq->group) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ not attached to group.\n");
+                       return -EINVAL;
+               }
+
+               if (strlen(wq->name) == 0) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ name not set.\n");
+                       return -EINVAL;
+               }
+
+               rc = idxd_wq_alloc_resources(wq);
+               if (rc < 0) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ resource alloc failed\n");
+                       return rc;
+               }
+
+               spin_lock_irqsave(&idxd->dev_lock, flags);
+               rc = idxd_device_config(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "Writing WQ %d config failed: %d\n",
+                                wq->id, rc);
+                       return rc;
+               }
+
+               rc = idxd_wq_enable(wq);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ %d enabling failed: %d\n",
+                                wq->id, rc);
+                       return rc;
+               }
+               spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+               rc = idxd_wq_map_portal(wq);
+               if (rc < 0) {
+                       dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+                       rc = idxd_wq_disable(wq);
+                       if (rc < 0)
+                               dev_warn(dev, "IDXD wq disable failed\n");
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       mutex_unlock(&wq->wq_lock);
+                       return rc;
+               }
+
+               wq->client_count = 0;
+
+               dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+               if (is_idxd_wq_dmaengine(wq)) {
+                       rc = idxd_register_dma_channel(wq);
+                       if (rc < 0) {
+                               dev_dbg(dev, "DMA channel register failed\n");
+                               mutex_unlock(&wq->wq_lock);
+                               return rc;
+                       }
+               } else if (is_idxd_wq_cdev(wq)) {
+                       rc = idxd_wq_add_cdev(wq);
+                       if (rc < 0) {
+                               dev_dbg(dev, "Cdev creation failed\n");
+                               mutex_unlock(&wq->wq_lock);
+                               return rc;
+                       }
+               }
+
+               mutex_unlock(&wq->wq_lock);
+               return 0;
+       }
+
+       return -ENODEV;
+}
+
+static void disable_wq(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       unsigned long flags;
+       int rc;
+
+       mutex_lock(&wq->wq_lock);
+       dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+       if (wq->state == IDXD_WQ_DISABLED) {
+               mutex_unlock(&wq->wq_lock);
+               return;
+       }
+
+       if (is_idxd_wq_dmaengine(wq))
+               idxd_unregister_dma_channel(wq);
+       else if (is_idxd_wq_cdev(wq))
+               idxd_wq_del_cdev(wq);
+
+       if (idxd_wq_refcount(wq))
+               dev_warn(dev, "Clients has claim on wq %d: %d\n",
+                        wq->id, idxd_wq_refcount(wq));
+
+       idxd_wq_unmap_portal(wq);
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       rc = idxd_wq_disable(wq);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+       idxd_wq_free_resources(wq);
+       wq->client_count = 0;
+       mutex_unlock(&wq->wq_lock);
+
+       if (rc < 0)
+               dev_warn(dev, "Failed to disable %s: %d\n",
+                        dev_name(&wq->conf_dev), rc);
+       else
+               dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+}
+
+static int idxd_config_bus_remove(struct device *dev)
+{
+       int rc;
+       unsigned long flags;
+
+       dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
+
+       /* disable workqueue here */
+       if (is_idxd_wq_dev(dev)) {
+               struct idxd_wq *wq = confdev_to_wq(dev);
+
+               disable_wq(wq);
+       } else if (is_idxd_dev(dev)) {
+               struct idxd_device *idxd = confdev_to_idxd(dev);
+               int i;
+
+               dev_dbg(dev, "%s removing dev %s\n", __func__,
+                       dev_name(&idxd->conf_dev));
+               for (i = 0; i < idxd->max_wqs; i++) {
+                       struct idxd_wq *wq = &idxd->wqs[i];
+
+                       if (wq->state == IDXD_WQ_DISABLED)
+                               continue;
+                       dev_warn(dev, "Active wq %d on disable %s.\n", i,
+                                dev_name(&idxd->conf_dev));
+                       device_release_driver(&wq->conf_dev);
+               }
+
+               idxd_unregister_dma_device(idxd);
+               spin_lock_irqsave(&idxd->dev_lock, flags);
+               rc = idxd_device_disable(idxd);
+               spin_unlock_irqrestore(&idxd->dev_lock, flags);
+               module_put(THIS_MODULE);
+               if (rc < 0)
+                       dev_warn(dev, "Device disable failed\n");
+               else
+                       dev_info(dev, "Device %s disabled\n", dev_name(dev));
+
+       }
+
+       return 0;
+}
+
+static void idxd_config_bus_shutdown(struct device *dev)
+{
+       dev_dbg(dev, "%s called\n", __func__);
+}
+
+struct bus_type dsa_bus_type = {
+       .name = "dsa",
+       .match = idxd_config_bus_match,
+       .probe = idxd_config_bus_probe,
+       .remove = idxd_config_bus_remove,
+       .shutdown = idxd_config_bus_shutdown,
+};
+
+static struct bus_type *idxd_bus_types[] = {
+       &dsa_bus_type
+};
+
+static struct idxd_device_driver dsa_drv = {
+       .drv = {
+               .name = "dsa",
+               .bus = &dsa_bus_type,
+               .owner = THIS_MODULE,
+               .mod_name = KBUILD_MODNAME,
+       },
+};
+
+static struct idxd_device_driver *idxd_drvs[] = {
+       &dsa_drv
+};
+
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+{
+       return idxd_bus_types[idxd->type];
+}
+
+static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+{
+       if (idxd->type == IDXD_TYPE_DSA)
+               return &dsa_device_type;
+       else
+               return NULL;
+}
+
+/* IDXD generic driver setup */
+int idxd_register_driver(void)
+{
+       int i, rc;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               rc = driver_register(&idxd_drvs[i]->drv);
+               if (rc < 0)
+                       goto drv_fail;
+       }
+
+       return 0;
+
+drv_fail:
+       for (; i > 0; i--)
+               driver_unregister(&idxd_drvs[i]->drv);
+       return rc;
+}
+
+void idxd_unregister_driver(void)
+{
+       int i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++)
+               driver_unregister(&idxd_drvs[i]->drv);
+}
+
+/* IDXD engine attributes */
+static ssize_t engine_group_id_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct idxd_engine *engine =
+               container_of(dev, struct idxd_engine, conf_dev);
+
+       if (engine->group)
+               return sprintf(buf, "%d\n", engine->group->id);
+       else
+               return sprintf(buf, "%d\n", -1);
+}
+
+static ssize_t engine_group_id_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct idxd_engine *engine =
+               container_of(dev, struct idxd_engine, conf_dev);
+       struct idxd_device *idxd = engine->idxd;
+       long id;
+       int rc;
+       struct idxd_group *prevg, *group;
+
+       rc = kstrtol(buf, 10, &id);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (id > idxd->max_groups - 1 || id < -1)
+               return -EINVAL;
+
+       if (id == -1) {
+               if (engine->group) {
+                       engine->group->num_engines--;
+                       engine->group = NULL;
+               }
+               return count;
+       }
+
+       group = &idxd->groups[id];
+       prevg = engine->group;
+
+       if (prevg)
+               prevg->num_engines--;
+       engine->group = &idxd->groups[id];
+       engine->group->num_engines++;
+
+       return count;
+}
+
+static struct device_attribute dev_attr_engine_group =
+               __ATTR(group_id, 0644, engine_group_id_show,
+                      engine_group_id_store);
+
+static struct attribute *idxd_engine_attributes[] = {
+       &dev_attr_engine_group.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_engine_attribute_group = {
+       .attrs = idxd_engine_attributes,
+};
+
+static const struct attribute_group *idxd_engine_attribute_groups[] = {
+       &idxd_engine_attribute_group,
+       NULL,
+};
+
+/* Group attributes */
+
+static void idxd_set_free_tokens(struct idxd_device *idxd)
+{
+       int i, tokens;
+
+       for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *g = &idxd->groups[i];
+
+               tokens += g->tokens_reserved;
+       }
+
+       idxd->nr_tokens = idxd->max_tokens - tokens;
+}
+
+static ssize_t group_tokens_reserved_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%u\n", group->tokens_reserved);
+}
+
+static ssize_t group_tokens_reserved_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (idxd->token_limit == 0)
+               return -EPERM;
+
+       if (val > idxd->max_tokens)
+               return -EINVAL;
+
+       if (val > idxd->nr_tokens)
+               return -EINVAL;
+
+       group->tokens_reserved = val;
+       idxd_set_free_tokens(idxd);
+       return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_reserved =
+               __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
+                      group_tokens_reserved_store);
+
+static ssize_t group_tokens_allowed_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%u\n", group->tokens_allowed);
+}
+
+static ssize_t group_tokens_allowed_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (idxd->token_limit == 0)
+               return -EPERM;
+       if (val < 4 * group->num_engines ||
+           val > group->tokens_reserved + idxd->nr_tokens)
+               return -EINVAL;
+
+       group->tokens_allowed = val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_allowed =
+               __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
+                      group_tokens_allowed_store);
+
+static ssize_t group_use_token_limit_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%u\n", group->use_token_limit);
+}
+
+static ssize_t group_use_token_limit_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (idxd->token_limit == 0)
+               return -EPERM;
+
+       group->use_token_limit = !!val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_use_token_limit =
+               __ATTR(use_token_limit, 0644, group_use_token_limit_show,
+                      group_use_token_limit_store);
+
+static ssize_t group_engines_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       int i, rc = 0;
+       char *tmp = buf;
+       struct idxd_device *idxd = group->idxd;
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               if (!engine->group)
+                       continue;
+
+               if (engine->group->id == group->id)
+                       rc += sprintf(tmp + rc, "engine%d.%d ",
+                                       idxd->id, engine->id);
+       }
+
+       rc--;
+       rc += sprintf(tmp + rc, "\n");
+
+       return rc;
+}
+
+static struct device_attribute dev_attr_group_engines =
+               __ATTR(engines, 0444, group_engines_show, NULL);
+
+static ssize_t group_work_queues_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       int i, rc = 0;
+       char *tmp = buf;
+       struct idxd_device *idxd = group->idxd;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               if (!wq->group)
+                       continue;
+
+               if (wq->group->id == group->id)
+                       rc += sprintf(tmp + rc, "wq%d.%d ",
+                                       idxd->id, wq->id);
+       }
+
+       rc--;
+       rc += sprintf(tmp + rc, "\n");
+
+       return rc;
+}
+
+static struct device_attribute dev_attr_group_work_queues =
+               __ATTR(work_queues, 0444, group_work_queues_show, NULL);
+
+static ssize_t group_traffic_class_a_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%d\n", group->tc_a);
+}
+
+static ssize_t group_traffic_class_a_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       long val;
+       int rc;
+
+       rc = kstrtol(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (val < 0 || val > 7)
+               return -EINVAL;
+
+       group->tc_a = val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_a =
+               __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
+                      group_traffic_class_a_store);
+
+static ssize_t group_traffic_class_b_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%d\n", group->tc_b);
+}
+
+static ssize_t group_traffic_class_b_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       long val;
+       int rc;
+
+       rc = kstrtol(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (val < 0 || val > 7)
+               return -EINVAL;
+
+       group->tc_b = val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_b =
+               __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
+                      group_traffic_class_b_store);
+
+static struct attribute *idxd_group_attributes[] = {
+       &dev_attr_group_work_queues.attr,
+       &dev_attr_group_engines.attr,
+       &dev_attr_group_use_token_limit.attr,
+       &dev_attr_group_tokens_allowed.attr,
+       &dev_attr_group_tokens_reserved.attr,
+       &dev_attr_group_traffic_class_a.attr,
+       &dev_attr_group_traffic_class_b.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_group_attribute_group = {
+       .attrs = idxd_group_attributes,
+};
+
+static const struct attribute_group *idxd_group_attribute_groups[] = {
+       &idxd_group_attribute_group,
+       NULL,
+};
+
+/* IDXD work queue attribs */
+static ssize_t wq_clients_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%d\n", wq->client_count);
+}
+
+static struct device_attribute dev_attr_wq_clients =
+               __ATTR(clients, 0444, wq_clients_show, NULL);
+
+static ssize_t wq_state_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       switch (wq->state) {
+       case IDXD_WQ_DISABLED:
+               return sprintf(buf, "disabled\n");
+       case IDXD_WQ_ENABLED:
+               return sprintf(buf, "enabled\n");
+       }
+
+       return sprintf(buf, "unknown\n");
+}
+
+static struct device_attribute dev_attr_wq_state =
+               __ATTR(state, 0444, wq_state_show, NULL);
+
+static ssize_t wq_group_id_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       if (wq->group)
+               return sprintf(buf, "%u\n", wq->group->id);
+       else
+               return sprintf(buf, "-1\n");
+}
+
+static ssize_t wq_group_id_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       struct idxd_device *idxd = wq->idxd;
+       long id;
+       int rc;
+       struct idxd_group *prevg, *group;
+
+       rc = kstrtol(buf, 10, &id);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (id > idxd->max_groups - 1 || id < -1)
+               return -EINVAL;
+
+       if (id == -1) {
+               if (wq->group) {
+                       wq->group->num_wqs--;
+                       wq->group = NULL;
+               }
+               return count;
+       }
+
+       group = &idxd->groups[id];
+       prevg = wq->group;
+
+       if (prevg)
+               prevg->num_wqs--;
+       wq->group = group;
+       group->num_wqs++;
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_group_id =
+               __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
+
+static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%s\n",
+                       wq_dedicated(wq) ? "dedicated" : "shared");
+}
+
+static ssize_t wq_mode_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       struct idxd_device *idxd = wq->idxd;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (sysfs_streq(buf, "dedicated")) {
+               set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+               wq->threshold = 0;
+       } else {
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_mode =
+               __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
+
+static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%u\n", wq->size);
+}
+
+static ssize_t wq_size_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       unsigned long size;
+       struct idxd_device *idxd = wq->idxd;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &size);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (size > idxd->max_wq_size)
+               return -EINVAL;
+
+       wq->size = size;
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_size =
+               __ATTR(size, 0644, wq_size_show, wq_size_store);
+
+static ssize_t wq_priority_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%u\n", wq->priority);
+}
+
+static ssize_t wq_priority_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       unsigned long prio;
+       struct idxd_device *idxd = wq->idxd;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &prio);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (prio > IDXD_MAX_PRIORITY)
+               return -EINVAL;
+
+       wq->priority = prio;
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_priority =
+               __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+
+static ssize_t wq_type_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       switch (wq->type) {
+       case IDXD_WQT_KERNEL:
+               return sprintf(buf, "%s\n",
+                              idxd_wq_type_names[IDXD_WQT_KERNEL]);
+       case IDXD_WQT_USER:
+               return sprintf(buf, "%s\n",
+                              idxd_wq_type_names[IDXD_WQT_USER]);
+       case IDXD_WQT_NONE:
+       default:
+               return sprintf(buf, "%s\n",
+                              idxd_wq_type_names[IDXD_WQT_NONE]);
+       }
+
+       return -EINVAL;
+}
+
+static ssize_t wq_type_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       enum idxd_wq_type old_type;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       old_type = wq->type;
+       if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+               wq->type = IDXD_WQT_KERNEL;
+       else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
+               wq->type = IDXD_WQT_USER;
+       else
+               wq->type = IDXD_WQT_NONE;
+
+       /* If we are changing queue type, clear the name */
+       if (wq->type != old_type)
+               memset(wq->name, 0, WQ_NAME_SIZE + 1);
+
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_type =
+               __ATTR(type, 0644, wq_type_show, wq_type_store);
+
+static ssize_t wq_name_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%s\n", wq->name);
+}
+
+static ssize_t wq_name_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
+               return -EINVAL;
+
+       memset(wq->name, 0, WQ_NAME_SIZE + 1);
+       strncpy(wq->name, buf, WQ_NAME_SIZE);
+       strreplace(wq->name, '\n', '\0');
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_name =
+               __ATTR(name, 0644, wq_name_show, wq_name_store);
+
+static ssize_t wq_cdev_minor_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+}
+
+static struct device_attribute dev_attr_wq_cdev_minor =
+               __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+
+static struct attribute *idxd_wq_attributes[] = {
+       &dev_attr_wq_clients.attr,
+       &dev_attr_wq_state.attr,
+       &dev_attr_wq_group_id.attr,
+       &dev_attr_wq_mode.attr,
+       &dev_attr_wq_size.attr,
+       &dev_attr_wq_priority.attr,
+       &dev_attr_wq_type.attr,
+       &dev_attr_wq_name.attr,
+       &dev_attr_wq_cdev_minor.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_wq_attribute_group = {
+       .attrs = idxd_wq_attributes,
+};
+
+static const struct attribute_group *idxd_wq_attribute_groups[] = {
+       &idxd_wq_attribute_group,
+       NULL,
+};
+
+/* IDXD device attribs */
+static ssize_t max_work_queues_size_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_wq_size);
+}
+static DEVICE_ATTR_RO(max_work_queues_size);
+
+static ssize_t max_groups_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_groups);
+}
+static DEVICE_ATTR_RO(max_groups);
+
+static ssize_t max_work_queues_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_wqs);
+}
+static DEVICE_ATTR_RO(max_work_queues);
+
+static ssize_t max_engines_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_engines);
+}
+static DEVICE_ATTR_RO(max_engines);
+
+static ssize_t numa_node_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t max_batch_size_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_batch_size);
+}
+static DEVICE_ATTR_RO(max_batch_size);
+
+static ssize_t max_transfer_size_show(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+}
+static DEVICE_ATTR_RO(max_transfer_size);
+
+static ssize_t op_cap_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+}
+static DEVICE_ATTR_RO(op_cap);
+
+static ssize_t configurable_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n",
+                       test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+}
+static DEVICE_ATTR_RO(configurable);
+
+static ssize_t clients_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+       unsigned long flags;
+       int count = 0, i;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               count += wq->client_count;
+       }
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+       return sprintf(buf, "%d\n", count);
+}
+static DEVICE_ATTR_RO(clients);
+
+static ssize_t state_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       switch (idxd->state) {
+       case IDXD_DEV_DISABLED:
+       case IDXD_DEV_CONF_READY:
+               return sprintf(buf, "disabled\n");
+       case IDXD_DEV_ENABLED:
+               return sprintf(buf, "enabled\n");
+       case IDXD_DEV_HALTED:
+               return sprintf(buf, "halted\n");
+       }
+
+       return sprintf(buf, "unknown\n");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t errors_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+       int i, out = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       for (i = 0; i < 4; i++)
+               out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       out--;
+       out += sprintf(buf + out, "\n");
+       return out;
+}
+static DEVICE_ATTR_RO(errors);
+
+static ssize_t max_tokens_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_tokens);
+}
+static DEVICE_ATTR_RO(max_tokens);
+
+static ssize_t token_limit_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->token_limit);
+}
+
+static ssize_t token_limit_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (!idxd->hw.group_cap.token_limit)
+               return -EPERM;
+
+       if (val > idxd->hw.group_cap.total_tokens)
+               return -EINVAL;
+
+       idxd->token_limit = val;
+       return count;
+}
+static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t cdev_major_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->major);
+}
+static DEVICE_ATTR_RO(cdev_major);
+
+static struct attribute *idxd_device_attributes[] = {
+       &dev_attr_max_groups.attr,
+       &dev_attr_max_work_queues.attr,
+       &dev_attr_max_work_queues_size.attr,
+       &dev_attr_max_engines.attr,
+       &dev_attr_numa_node.attr,
+       &dev_attr_max_batch_size.attr,
+       &dev_attr_max_transfer_size.attr,
+       &dev_attr_op_cap.attr,
+       &dev_attr_configurable.attr,
+       &dev_attr_clients.attr,
+       &dev_attr_state.attr,
+       &dev_attr_errors.attr,
+       &dev_attr_max_tokens.attr,
+       &dev_attr_token_limit.attr,
+       &dev_attr_cdev_major.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_device_attribute_group = {
+       .attrs = idxd_device_attributes,
+};
+
+static const struct attribute_group *idxd_attribute_groups[] = {
+       &idxd_device_attribute_group,
+       NULL,
+};
+
+static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i, rc;
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               engine->conf_dev.parent = &idxd->conf_dev;
+               dev_set_name(&engine->conf_dev, "engine%d.%d",
+                            idxd->id, engine->id);
+               engine->conf_dev.bus = idxd_get_bus_type(idxd);
+               engine->conf_dev.groups = idxd_engine_attribute_groups;
+               engine->conf_dev.type = &idxd_engine_device_type;
+               dev_dbg(dev, "Engine device register: %s\n",
+                       dev_name(&engine->conf_dev));
+               rc = device_register(&engine->conf_dev);
+               if (rc < 0) {
+                       put_device(&engine->conf_dev);
+                       goto cleanup;
+               }
+       }
+
+       return 0;
+
+cleanup:
+       while (i--) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               device_unregister(&engine->conf_dev);
+       }
+       return rc;
+}
+
+static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i, rc;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               group->conf_dev.parent = &idxd->conf_dev;
+               dev_set_name(&group->conf_dev, "group%d.%d",
+                            idxd->id, group->id);
+               group->conf_dev.bus = idxd_get_bus_type(idxd);
+               group->conf_dev.groups = idxd_group_attribute_groups;
+               group->conf_dev.type = &idxd_group_device_type;
+               dev_dbg(dev, "Group device register: %s\n",
+                       dev_name(&group->conf_dev));
+               rc = device_register(&group->conf_dev);
+               if (rc < 0) {
+                       put_device(&group->conf_dev);
+                       goto cleanup;
+               }
+       }
+
+       return 0;
+
+cleanup:
+       while (i--) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               device_unregister(&group->conf_dev);
+       }
+       return rc;
+}
+
+static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i, rc;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               wq->conf_dev.parent = &idxd->conf_dev;
+               dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+               wq->conf_dev.bus = idxd_get_bus_type(idxd);
+               wq->conf_dev.groups = idxd_wq_attribute_groups;
+               wq->conf_dev.type = &idxd_wq_device_type;
+               dev_dbg(dev, "WQ device register: %s\n",
+                       dev_name(&wq->conf_dev));
+               rc = device_register(&wq->conf_dev);
+               if (rc < 0) {
+                       put_device(&wq->conf_dev);
+                       goto cleanup;
+               }
+       }
+
+       return 0;
+
+cleanup:
+       while (i--) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               device_unregister(&wq->conf_dev);
+       }
+       return rc;
+}
+
+static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+       char devname[IDXD_NAME_SIZE];
+
+       sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+       idxd->conf_dev.parent = dev;
+       dev_set_name(&idxd->conf_dev, "%s", devname);
+       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+       idxd->conf_dev.groups = idxd_attribute_groups;
+       idxd->conf_dev.type = idxd_get_device_type(idxd);
+
+       dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+       rc = device_register(&idxd->conf_dev);
+       if (rc < 0) {
+               put_device(&idxd->conf_dev);
+               return rc;
+       }
+
+       return 0;
+}
+
+int idxd_setup_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+
+       rc = idxd_setup_device_sysfs(idxd);
+       if (rc < 0) {
+               dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       rc = idxd_setup_wq_sysfs(idxd);
+       if (rc < 0) {
+               /* unregister conf dev */
+               dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       rc = idxd_setup_group_sysfs(idxd);
+       if (rc < 0) {
+               /* unregister conf dev */
+               dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       rc = idxd_setup_engine_sysfs(idxd);
+       if (rc < 0) {
+               /* unregister conf dev */
+               dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+void idxd_cleanup_sysfs(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               device_unregister(&wq->conf_dev);
+       }
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               device_unregister(&engine->conf_dev);
+       }
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               device_unregister(&group->conf_dev);
+       }
+
+       device_unregister(&idxd->conf_dev);
+}
+
+int idxd_register_bus_type(void)
+{
+       int i, rc;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               rc = bus_register(idxd_bus_types[i]);
+               if (rc < 0)
+                       goto bus_err;
+       }
+
+       return 0;
+
+bus_err:
+       for (; i > 0; i--)
+               bus_unregister(idxd_bus_types[i]);
+       return rc;
+}
+
+void idxd_unregister_bus_type(void)
+{
+       int i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++)
+               bus_unregister(idxd_bus_types[i]);
+}
index c27e206..066b21a 100644 (file)
@@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
                return;
        }
        sdmac->desc = desc = to_sdma_desc(&vd->tx);
-       /*
-        * Do not delete the node in desc_issued list in cyclic mode, otherwise
-        * the desc allocated will never be freed in vchan_dma_desc_free_list
-        */
-       if (!(sdmac->flags & IMX_DMA_SG_LOOP))
-               list_del(&vd->node);
+
+       list_del(&vd->node);
 
        sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
        sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work)
 
        spin_lock_irqsave(&sdmac->vc.lock, flags);
        vchan_get_all_descriptors(&sdmac->vc, &head);
-       sdmac->desc = NULL;
        spin_unlock_irqrestore(&sdmac->vc.lock, flags);
        vchan_dma_desc_free_list(&sdmac->vc, &head);
        sdmac->context_loaded = false;
 }
 
-static int sdma_disable_channel_async(struct dma_chan *chan)
+static int sdma_terminate_all(struct dma_chan *chan)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
 
        sdma_disable_channel(chan);
 
-       if (sdmac->desc)
+       if (sdmac->desc) {
+               vchan_terminate_vdesc(&sdmac->desc->vd);
+               sdmac->desc = NULL;
                schedule_work(&sdmac->terminate_worker);
+       }
+
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
        return 0;
 }
@@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
 
-       sdma_disable_channel_async(chan);
+       sdma_terminate_all(chan);
 
        sdma_channel_synchronize(chan);
 
@@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
                                      struct dma_tx_state *txstate)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       struct sdma_desc *desc;
+       struct sdma_desc *desc = NULL;
        u32 residue;
        struct virt_dma_desc *vd;
        enum dma_status ret;
@@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
                return ret;
 
        spin_lock_irqsave(&sdmac->vc.lock, flags);
+
        vd = vchan_find_desc(&sdmac->vc, cookie);
-       if (vd) {
+       if (vd)
                desc = to_sdma_desc(&vd->tx);
+       else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
+               desc = sdmac->desc;
+
+       if (desc) {
                if (sdmac->flags & IMX_DMA_SG_LOOP)
                        residue = (desc->num_bd - desc->buf_ptail) *
                                desc->period_len - desc->chn_real_count;
                else
                        residue = desc->chn_count - desc->chn_real_count;
-       } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
-               residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
        } else {
                residue = 0;
        }
+
        spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
        dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
@@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev)
        sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
        sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
        sdma->dma_device.device_config = sdma_config;
-       sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+       sdma->dma_device.device_terminate_all = sdma_terminate_all;
        sdma->dma_device.device_synchronize = sdma_channel_synchronize;
        sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
        sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
index a6a6dc4..60e9afb 100644 (file)
@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
        ioat_kobject_del(ioat_dma);
 
        dma_async_device_unregister(dma);
-
-       dma_pool_destroy(ioat_dma->completion_pool);
-
-       INIT_LIST_HEAD(&dma->channels);
 }
 
 /**
@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
        dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
 
        for (i = 0; i < dma->chancnt; i++) {
-               ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+               ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
                if (!ioat_chan)
                        break;
 
@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
                return;
 
        ioat_stop(ioat_chan);
-       ioat_reset_hw(ioat_chan);
 
-       /* Put LTR to idle */
-       if (ioat_dma->version >= IOAT_VER_3_4)
-               writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
-                       ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+       if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+               ioat_reset_hw(ioat_chan);
+
+               /* Put LTR to idle */
+               if (ioat_dma->version >= IOAT_VER_3_4)
+                       writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+                              ioat_chan->reg_base +
+                              IOAT_CHAN_LTR_SWSEL_OFFSET);
+       }
 
        spin_lock_bh(&ioat_chan->cleanup_lock);
        spin_lock_bh(&ioat_chan->prep_lock);
@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
        .err_handler    = &ioat_err_handler,
 };
 
+static void release_ioatdma(struct dma_device *device)
+{
+       struct ioatdma_device *d = to_ioatdma_device(device);
+       int i;
+
+       for (i = 0; i < IOAT_MAX_CHANS; i++)
+               kfree(d->idx[i]);
+
+       dma_pool_destroy(d->completion_pool);
+       kfree(d);
+}
+
 static struct ioatdma_device *
 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
 {
-       struct device *dev = &pdev->dev;
-       struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+       struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
 
        if (!d)
                return NULL;
        d->pdev = pdev;
        d->reg_base = iobase;
+       d->dma_dev.device_release = release_ioatdma;
        return d;
 }
 
@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
        if (!device)
                return;
 
+       ioat_shutdown(pdev);
+
        dev_err(&pdev->dev, "Removing dma and dca services\n");
        if (device->dca) {
                unregister_dca_provider(device->dca, &pdev->dev);
index c20e6bd..29f1223 100644 (file)
@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
 
        spin_lock_irqsave(&c->vc.lock, flags);
        vchan_get_all_descriptors(&c->vc, &head);
-       vchan_dma_desc_free_list(&c->vc, &head);
        spin_unlock_irqrestore(&c->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&c->vc, &head);
+
        return 0;
 }
 
index c2d779d..b2c2b5e 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/of.h>
 #include <linux/of_dma.h>
 
+#include "dmaengine.h"
+
 static LIST_HEAD(of_dma_list);
 static DEFINE_MUTEX(of_dma_lock);
 
index 023f951..c683051 100644 (file)
@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
        }
 
        vchan_get_all_descriptors(&vchan->vc, &head);
-       vchan_dma_desc_free_list(&vchan->vc, &head);
 
        spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
        return 0;
 }
 
index 6cce9ef..88b884c 100644 (file)
@@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
 {
        struct amba_device *pcdev = to_amba_device(dev);
 
-       pm_runtime_disable(dev);
-
-       if (!pm_runtime_status_suspended(dev)) {
-               /* amba did not disable the clock */
-               amba_pclk_disable(pcdev);
-       }
+       pm_runtime_force_suspend(dev);
        amba_pclk_unprepare(pcdev);
 
        return 0;
@@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev)
        if (ret)
                return ret;
 
-       if (!pm_runtime_status_suspended(dev))
-               ret = amba_pclk_enable(pcdev);
-
-       pm_runtime_enable(dev);
+       pm_runtime_force_resume(dev);
 
        return ret;
 }
 
-static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+static const struct dev_pm_ops pl330_pm = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
+};
 
 static int
 pl330_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
new file mode 100644 (file)
index 0000000..db4c5fd
--- /dev/null
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
+ * Copyright (c) 2019, GigaIO Networks, Inc
+ */
+
+#include "dmaengine.h"
+
+#include <linux/circ_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logan Gunthorpe");
+
+#define PLX_REG_DESC_RING_ADDR                 0x214
+#define PLX_REG_DESC_RING_ADDR_HI              0x218
+#define PLX_REG_DESC_RING_NEXT_ADDR            0x21C
+#define PLX_REG_DESC_RING_COUNT                        0x220
+#define PLX_REG_DESC_RING_LAST_ADDR            0x224
+#define PLX_REG_DESC_RING_LAST_SIZE            0x228
+#define PLX_REG_PREF_LIMIT                     0x234
+#define PLX_REG_CTRL                           0x238
+#define PLX_REG_CTRL2                          0x23A
+#define PLX_REG_INTR_CTRL                      0x23C
+#define PLX_REG_INTR_STATUS                    0x23E
+
+#define PLX_REG_PREF_LIMIT_PREF_FOUR           8
+
+#define PLX_REG_CTRL_GRACEFUL_PAUSE            BIT(0)
+#define PLX_REG_CTRL_ABORT                     BIT(1)
+#define PLX_REG_CTRL_WRITE_BACK_EN             BIT(2)
+#define PLX_REG_CTRL_START                     BIT(3)
+#define PLX_REG_CTRL_RING_STOP_MODE            BIT(4)
+#define PLX_REG_CTRL_DESC_MODE_BLOCK           (0 << 5)
+#define PLX_REG_CTRL_DESC_MODE_ON_CHIP         (1 << 5)
+#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP                (2 << 5)
+#define PLX_REG_CTRL_DESC_INVALID              BIT(8)
+#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE       BIT(9)
+#define PLX_REG_CTRL_ABORT_DONE                        BIT(10)
+#define PLX_REG_CTRL_IMM_PAUSE_DONE            BIT(12)
+#define PLX_REG_CTRL_IN_PROGRESS               BIT(30)
+
+#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
+                                PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
+                                PLX_REG_CTRL_ABORT_DONE | \
+                                PLX_REG_CTRL_IMM_PAUSE_DONE)
+
+#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
+                                PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
+                                PLX_REG_CTRL_START | \
+                                PLX_REG_CTRL_RESET_VAL)
+
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B                0
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B       1
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B       2
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B       3
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB                4
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB                5
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B         7
+
+#define PLX_REG_INTR_CRTL_ERROR_EN             BIT(0)
+#define PLX_REG_INTR_CRTL_INV_DESC_EN          BIT(1)
+#define PLX_REG_INTR_CRTL_ABORT_DONE_EN                BIT(3)
+#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN                BIT(4)
+#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN    BIT(5)
+
+#define PLX_REG_INTR_STATUS_ERROR              BIT(0)
+#define PLX_REG_INTR_STATUS_INV_DESC           BIT(1)
+#define PLX_REG_INTR_STATUS_DESC_DONE          BIT(2)
+#define PLX_REG_INTR_CRTL_ABORT_DONE           BIT(3)
+
+struct plx_dma_hw_std_desc {
+       __le32 flags_and_size;
+       __le16 dst_addr_hi;
+       __le16 src_addr_hi;
+       __le32 dst_addr_lo;
+       __le32 src_addr_lo;
+};
+
+#define PLX_DESC_SIZE_MASK             0x7ffffff
+#define PLX_DESC_FLAG_VALID            BIT(31)
+#define PLX_DESC_FLAG_INT_WHEN_DONE    BIT(30)
+
+#define PLX_DESC_WB_SUCCESS            BIT(30)
+#define PLX_DESC_WB_RD_FAIL            BIT(29)
+#define PLX_DESC_WB_WR_FAIL            BIT(28)
+
+#define PLX_DMA_RING_COUNT             2048
+
+struct plx_dma_desc {
+       struct dma_async_tx_descriptor txd;
+       struct plx_dma_hw_std_desc *hw;
+       u32 orig_size;
+};
+
+struct plx_dma_dev {
+       struct dma_device dma_dev;
+       struct dma_chan dma_chan;
+       struct pci_dev __rcu *pdev;
+       void __iomem *bar;
+       struct tasklet_struct desc_task;
+
+       spinlock_t ring_lock;
+       bool ring_active;
+       int head;
+       int tail;
+       struct plx_dma_hw_std_desc *hw_ring;
+       dma_addr_t hw_ring_dma;
+       struct plx_dma_desc **desc_ring;
+};
+
+static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
+{
+       return container_of(c, struct plx_dma_dev, dma_chan);
+}
+
+static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
+{
+       return container_of(txd, struct plx_dma_desc, txd);
+}
+
+static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
+{
+       return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
+}
+
+static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
+{
+       struct dmaengine_result res;
+       struct plx_dma_desc *desc;
+       u32 flags;
+
+       spin_lock_bh(&plxdev->ring_lock);
+
+       while (plxdev->tail != plxdev->head) {
+               desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+               flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
+
+               if (flags & PLX_DESC_FLAG_VALID)
+                       break;
+
+               res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
+
+               if (flags & PLX_DESC_WB_SUCCESS)
+                       res.result = DMA_TRANS_NOERROR;
+               else if (flags & PLX_DESC_WB_WR_FAIL)
+                       res.result = DMA_TRANS_WRITE_FAILED;
+               else
+                       res.result = DMA_TRANS_READ_FAILED;
+
+               dma_cookie_complete(&desc->txd);
+               dma_descriptor_unmap(&desc->txd);
+               dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+               desc->txd.callback = NULL;
+               desc->txd.callback_result = NULL;
+
+               plxdev->tail++;
+       }
+
+       spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+{
+       struct dmaengine_result res;
+       struct plx_dma_desc *desc;
+
+       plx_dma_process_desc(plxdev);
+
+       spin_lock_bh(&plxdev->ring_lock);
+
+       while (plxdev->tail != plxdev->head) {
+               desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+               res.residue = desc->orig_size;
+               res.result = DMA_TRANS_ABORTED;
+
+               dma_cookie_complete(&desc->txd);
+               dma_descriptor_unmap(&desc->txd);
+               dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+               desc->txd.callback = NULL;
+               desc->txd.callback_result = NULL;
+
+               plxdev->tail++;
+       }
+
+       spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void __plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+       u32 val;
+
+       val = readl(plxdev->bar + PLX_REG_CTRL);
+       if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
+               return;
+
+       writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+              plxdev->bar + PLX_REG_CTRL);
+
+       while (!time_after(jiffies, timeout)) {
+               val = readl(plxdev->bar + PLX_REG_CTRL);
+               if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
+                       break;
+
+               cpu_relax();
+       }
+
+       if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
+               dev_err(plxdev->dma_dev.dev,
+                       "Timeout waiting for graceful pause!\n");
+
+       writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+              plxdev->bar + PLX_REG_CTRL);
+
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+}
+
+static void plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+       rcu_read_lock();
+       if (!rcu_dereference(plxdev->pdev)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       __plx_dma_stop(plxdev);
+
+       rcu_read_unlock();
+}
+
+static void plx_dma_desc_task(unsigned long data)
+{
+       struct plx_dma_dev *plxdev = (void *)data;
+
+       plx_dma_process_desc(plxdev);
+}
+
+static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
+               dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+               unsigned long flags)
+       __acquires(plxdev->ring_lock)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
+       struct plx_dma_desc *plxdesc;
+
+       spin_lock_bh(&plxdev->ring_lock);
+       if (!plxdev->ring_active)
+               goto err_unlock;
+
+       if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
+               goto err_unlock;
+
+       if (len > PLX_DESC_SIZE_MASK)
+               goto err_unlock;
+
+       plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
+       plxdev->head++;
+
+       plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
+       plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
+       plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
+       plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
+
+       plxdesc->orig_size = len;
+
+       if (flags & DMA_PREP_INTERRUPT)
+               len |= PLX_DESC_FLAG_INT_WHEN_DONE;
+
+       plxdesc->hw->flags_and_size = cpu_to_le32(len);
+       plxdesc->txd.flags = flags;
+
+       /* return with the lock held, it will be released in tx_submit */
+
+       return &plxdesc->txd;
+
+err_unlock:
+       /*
+        * Keep sparse happy by restoring an even lock count on
+        * this lock.
+        */
+       __acquire(plxdev->ring_lock);
+
+       spin_unlock_bh(&plxdev->ring_lock);
+       return NULL;
+}
+
+static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
+       __releases(plxdev->ring_lock)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
+       struct plx_dma_desc *plxdesc = to_plx_desc(desc);
+       dma_cookie_t cookie;
+
+       cookie = dma_cookie_assign(desc);
+
+       /*
+        * Ensure the descriptor updates are visible to the dma device
+        * before setting the valid bit.
+        */
+       wmb();
+
+       plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
+
+       spin_unlock_bh(&plxdev->ring_lock);
+
+       return cookie;
+}
+
+static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
+               dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+       enum dma_status ret;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE)
+               return ret;
+
+       plx_dma_process_desc(plxdev);
+
+       return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void plx_dma_issue_pending(struct dma_chan *chan)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+
+       rcu_read_lock();
+       if (!rcu_dereference(plxdev->pdev)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       /*
+        * Ensure the valid bits are visible before starting the
+        * DMA engine.
+        */
+       wmb();
+
+       writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
+
+       rcu_read_unlock();
+}
+
+static irqreturn_t plx_dma_isr(int irq, void *devid)
+{
+       struct plx_dma_dev *plxdev = devid;
+       u32 status;
+
+       status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
+
+       if (!status)
+               return IRQ_NONE;
+
+       if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
+               tasklet_schedule(&plxdev->desc_task);
+
+       writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
+
+       return IRQ_HANDLED;
+}
+
+static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
+{
+       struct plx_dma_desc *desc;
+       int i;
+
+       plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
+                                   sizeof(*plxdev->desc_ring), GFP_KERNEL);
+       if (!plxdev->desc_ring)
+               return -ENOMEM;
+
+       for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
+               desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+               if (!desc)
+                       goto free_and_exit;
+
+               dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
+               desc->txd.tx_submit = plx_dma_tx_submit;
+               desc->hw = &plxdev->hw_ring[i];
+
+               plxdev->desc_ring[i] = desc;
+       }
+
+       return 0;
+
+free_and_exit:
+       for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+               kfree(plxdev->desc_ring[i]);
+       kfree(plxdev->desc_ring);
+       return -ENOMEM;
+}
+
+static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+       size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+       int rc;
+
+       plxdev->head = plxdev->tail = 0;
+       plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
+                                            &plxdev->hw_ring_dma, GFP_KERNEL);
+       if (!plxdev->hw_ring)
+               return -ENOMEM;
+
+       rc = plx_dma_alloc_desc(plxdev);
+       if (rc)
+               goto out_free_hw_ring;
+
+       rcu_read_lock();
+       if (!rcu_dereference(plxdev->pdev)) {
+               rcu_read_unlock();
+               rc = -ENODEV;
+               goto out_free_hw_ring;
+       }
+
+       writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
+       writel(lower_32_bits(plxdev->hw_ring_dma),
+              plxdev->bar + PLX_REG_DESC_RING_ADDR);
+       writel(upper_32_bits(plxdev->hw_ring_dma),
+              plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+       writel(lower_32_bits(plxdev->hw_ring_dma),
+              plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+       writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+       writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
+
+       plxdev->ring_active = true;
+
+       rcu_read_unlock();
+
+       return PLX_DMA_RING_COUNT;
+
+out_free_hw_ring:
+       dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+                         plxdev->hw_ring_dma);
+       return rc;
+}
+
+static void plx_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+       size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+       struct pci_dev *pdev;
+       int irq = -1;
+       int i;
+
+       spin_lock_bh(&plxdev->ring_lock);
+       plxdev->ring_active = false;
+       spin_unlock_bh(&plxdev->ring_lock);
+
+       plx_dma_stop(plxdev);
+
+       rcu_read_lock();
+       pdev = rcu_dereference(plxdev->pdev);
+       if (pdev)
+               irq = pci_irq_vector(pdev, 0);
+       rcu_read_unlock();
+
+       if (irq > 0)
+               synchronize_irq(irq);
+
+       tasklet_kill(&plxdev->desc_task);
+
+       plx_dma_abort_desc(plxdev);
+
+       for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+               kfree(plxdev->desc_ring[i]);
+
+       kfree(plxdev->desc_ring);
+       dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+                         plxdev->hw_ring_dma);
+
+}
+
+static void plx_dma_release(struct dma_device *dma_dev)
+{
+       struct plx_dma_dev *plxdev =
+               container_of(dma_dev, struct plx_dma_dev, dma_dev);
+
+       put_device(dma_dev->dev);
+       kfree(plxdev);
+}
+
+static int plx_dma_create(struct pci_dev *pdev)
+{
+       struct plx_dma_dev *plxdev;
+       struct dma_device *dma;
+       struct dma_chan *chan;
+       int rc;
+
+       plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
+       if (!plxdev)
+               return -ENOMEM;
+
+       rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+                        KBUILD_MODNAME, plxdev);
+       if (rc) {
+               kfree(plxdev);
+               return rc;
+       }
+
+       spin_lock_init(&plxdev->ring_lock);
+       tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
+                    (unsigned long)plxdev);
+
+       RCU_INIT_POINTER(plxdev->pdev, pdev);
+       plxdev->bar = pcim_iomap_table(pdev)[0];
+
+       dma = &plxdev->dma_dev;
+       dma->chancnt = 1;
+       INIT_LIST_HEAD(&dma->channels);
+       dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+       dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
+       dma->dev = get_device(&pdev->dev);
+
+       dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = plx_dma_free_chan_resources;
+       dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
+       dma->device_issue_pending = plx_dma_issue_pending;
+       dma->device_tx_status = plx_dma_tx_status;
+       dma->device_release = plx_dma_release;
+
+       chan = &plxdev->dma_chan;
+       chan->device = dma;
+       dma_cookie_init(chan);
+       list_add_tail(&chan->device_node, &dma->channels);
+
+       rc = dma_async_device_register(dma);
+       if (rc) {
+               pci_err(pdev, "Failed to register dma device: %d\n", rc);
+               free_irq(pci_irq_vector(pdev, 0),  plxdev);
+               kfree(plxdev);
+               return rc;
+       }
+
+       pci_set_drvdata(pdev, plxdev);
+
+       return 0;
+}
+
+static int plx_dma_probe(struct pci_dev *pdev,
+                        const struct pci_device_id *id)
+{
+       int rc;
+
+       rc = pcim_enable_device(pdev);
+       if (rc)
+               return rc;
+
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (rc)
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (rc)
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
+       if (rc)
+               return rc;
+
+       rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (rc <= 0)
+               return rc;
+
+       pci_set_master(pdev);
+
+       rc = plx_dma_create(pdev);
+       if (rc)
+               goto err_free_irq_vectors;
+
+       pci_info(pdev, "PLX DMA Channel Registered\n");
+
+       return 0;
+
+err_free_irq_vectors:
+       pci_free_irq_vectors(pdev);
+       return rc;
+}
+
+static void plx_dma_remove(struct pci_dev *pdev)
+{
+       struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
+
+       free_irq(pci_irq_vector(pdev, 0),  plxdev);
+
+       rcu_assign_pointer(plxdev->pdev, NULL);
+       synchronize_rcu();
+
+       spin_lock_bh(&plxdev->ring_lock);
+       plxdev->ring_active = false;
+       spin_unlock_bh(&plxdev->ring_lock);
+
+       __plx_dma_stop(plxdev);
+       plx_dma_abort_desc(plxdev);
+
+       plxdev->bar = NULL;
+       dma_async_device_unregister(&plxdev->dma_dev);
+
+       pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id plx_dma_pci_tbl[] = {
+       {
+               .vendor         = PCI_VENDOR_ID_PLX,
+               .device         = 0x87D0,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .class          = PCI_CLASS_SYSTEM_OTHER << 8,
+               .class_mask     = 0xFFFFFFFF,
+       },
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
+
+static struct pci_driver plx_dma_pci_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = plx_dma_pci_tbl,
+       .probe          = plx_dma_probe,
+       .remove         = plx_dma_remove,
+};
+module_pci_driver(plx_dma_pci_driver);
index 43da8ee..8e14c72 100644 (file)
@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
        s3c24xx_dma_start_next_sg(s3cchan, txd);
 }
 
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
-                               struct s3c24xx_dma_chan *s3cchan)
-{
-       LIST_HEAD(head);
-
-       vchan_get_all_descriptors(&s3cchan->vc, &head);
-       vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
 /*
  * Try to allocate a physical channel.  When successful, assign it to
  * this virtual channel, and initiate the next descriptor.  The
@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
 {
        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+       LIST_HEAD(head);
        unsigned long flags;
-       int ret = 0;
+       int ret;
 
        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 
@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
        }
 
        /* Dequeue jobs not yet fired as well */
-       s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+       vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+       spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+       vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+       return 0;
+
 unlock:
        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 
@@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
 
        /* Basic sanity check */
        if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
-               dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+               dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
                        pdata->num_phy_channels, MAX_DMA_CHANNELS);
                return -EINVAL;
        }
index 465256f..6d0bec9 100644 (file)
@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
        kfree(chan->desc);
        chan->desc = NULL;
        vchan_get_all_descriptors(&chan->vchan, &head);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
        sf_pdma_disclaim_chan(chan);
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
 }
 
 static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
        chan->desc = NULL;
        chan->xfer_err = false;
        vchan_get_all_descriptors(&chan->vchan, &head);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
 
        return 0;
 }
index e397a50..bbc2bda 100644 (file)
@@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
        dma_addr_t src, dest;
        u32 endpoints;
        int nr_periods, offset, plength, i;
+       u8 ram_type, io_mode, linear_mode;
 
        if (!is_slave_direction(dir)) {
                dev_err(chan2dev(chan), "Invalid DMA direction\n");
                return NULL;
        }
 
-       if (vchan->is_dedicated) {
-               /*
-                * As we are using this just for audio data, we need to use
-                * normal DMA. There is nothing stopping us from supporting
-                * dedicated DMA here as well, so if a client comes up and
-                * requires it, it will be simple to implement it.
-                */
-               dev_err(chan2dev(chan),
-                       "Cyclic transfers are only supported on Normal DMA\n");
-               return NULL;
-       }
-
        contract = generate_dma_contract();
        if (!contract)
                return NULL;
 
        contract->is_cyclic = 1;
 
-       /* Figure out the endpoints and the address we need */
+       if (vchan->is_dedicated) {
+               io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+               linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+               ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+       } else {
+               io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+               linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+               ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+       }
+
        if (dir == DMA_MEM_TO_DEV) {
                src = buf;
                dest = sconfig->dst_addr;
-               endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
-                           SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
-                           SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+               endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+                           SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+                           SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
        } else {
                src = sconfig->src_addr;
                dest = buf;
-               endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
-                           SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
-                           SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+               endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+                           SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+                           SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
        }
 
        /*
@@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
                        dest = buf + offset;
 
                /* Make the promise */
-               promise = generate_ndma_promise(chan, src, dest,
-                                               plength, sconfig, dir);
+               if (vchan->is_dedicated)
+                       promise = generate_ddma_promise(chan, src, dest,
+                                                       plength, sconfig);
+               else
+                       promise = generate_ndma_promise(chan, src, dest,
+                                                       plength, sconfig, dir);
+
                if (!promise) {
                        /* TODO: should we free everything? */
                        return NULL;
@@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
        }
 
        spin_lock_irqsave(&vchan->vc.lock, flags);
-       vchan_dma_desc_free_list(&vchan->vc, &head);
        /* Clear these so the vchan is usable again */
        vchan->processing = NULL;
        vchan->pchan = NULL;
        spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
        return 0;
 }
 
index d507c24..f76e066 100644 (file)
@@ -34,5 +34,29 @@ config DMA_OMAP
          Enable support for the TI sDMA (System DMA or DMA4) controller. This
          DMA engine is found on OMAP and DRA7xx parts.
 
+config TI_K3_UDMA
+       bool "Texas Instruments UDMA support"
+       depends on ARCH_K3 || COMPILE_TEST
+       depends on TI_SCI_PROTOCOL
+       depends on TI_SCI_INTA_IRQCHIP
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select TI_K3_RINGACC
+       select TI_K3_PSIL
+        help
+         Enable support for the TI UDMA (Unified DMA) controller. This
+         DMA engine is used in AM65x and j721e.
+
+config TI_K3_UDMA_GLUE_LAYER
+       bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+       depends on ARCH_K3 || COMPILE_TEST
+       depends on TI_K3_UDMA
+       help
+         Say y here to support the K3 NAVSS DMA glue interface
+         If unsure, say N.
+
+config TI_K3_PSIL
+       bool
+
 config TI_DMA_CROSSBAR
        bool
index 113e59e..9a29a10 100644 (file)
@@ -2,4 +2,7 @@
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
+obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
 obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
index 756a3c9..03a7f64 100644 (file)
@@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev)
        if (!info)
                return -ENODEV;
 
-       pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_get_sync() failed\n");
-               return ret;
-       }
-
        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
@@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ecc);
 
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               dev_err(dev, "pm_runtime_get_sync() failed\n");
+               pm_runtime_disable(dev);
+               return ret;
+       }
+
        /* Get eDMA3 configuration from IP */
        ret = edma_setup_from_hw(dev, info, ecc);
        if (ret)
-               return ret;
+               goto err_disable_pm;
 
        /* Allocate memory based on the information we got from the IP */
        ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
                                        sizeof(*ecc->slave_chans), GFP_KERNEL);
-       if (!ecc->slave_chans)
-               return -ENOMEM;
 
        ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
                                       sizeof(unsigned long), GFP_KERNEL);
-       if (!ecc->slot_inuse)
-               return -ENOMEM;
 
        ecc->channels_mask = devm_kcalloc(dev,
                                           BITS_TO_LONGS(ecc->num_channels),
                                           sizeof(unsigned long), GFP_KERNEL);
-       if (!ecc->channels_mask)
-               return -ENOMEM;
+       if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
+               ret = -ENOMEM;
+               goto err_disable_pm;
+       }
 
        /* Mark all channels available initially */
        bitmap_fill(ecc->channels_mask, ecc->num_channels);
@@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev)
                                       ecc);
                if (ret) {
                        dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
-                       return ret;
+                       goto err_disable_pm;
                }
                ecc->ccint = irq;
        }
@@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev)
                                       ecc);
                if (ret) {
                        dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
-                       return ret;
+                       goto err_disable_pm;
                }
                ecc->ccerrint = irq;
        }
@@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev)
        ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
        if (ecc->dummy_slot < 0) {
                dev_err(dev, "Can't allocate PaRAM dummy slot\n");
-               return ecc->dummy_slot;
+               ret = ecc->dummy_slot;
+               goto err_disable_pm;
        }
 
        queue_priority_mapping = info->queue_priority_mapping;
@@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev)
 
 err_reg1:
        edma_free_slot(ecc, ecc->dummy_slot);
+err_disable_pm:
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
        return ret;
 }
 
@@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev)
        if (ecc->dma_memcpy)
                dma_async_device_unregister(ecc->dma_memcpy);
        edma_free_slot(ecc, ecc->dummy_slot);
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
 
        return 0;
 }
diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c
new file mode 100644 (file)
index 0000000..a896a15
--- /dev/null
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+               },                                      \
+       }
+
+#define PSIL_PDMA_XY_PKT(x)                            \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pkt_mode = 1,                  \
+               },                                      \
+       }
+
+#define PSIL_ETHERNET(x)                               \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 16,                 \
+               },                                      \
+       }
+
+#define PSIL_SA2UL(x, tx)                              \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 64,                 \
+                       .notdpkt = tx,                  \
+               },                                      \
+       }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am654_src_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0x4000, 0),
+       PSIL_SA2UL(0x4001, 0),
+       PSIL_SA2UL(0x4002, 0),
+       PSIL_SA2UL(0x4003, 0),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0x4100),
+       PSIL_ETHERNET(0x4101),
+       PSIL_ETHERNET(0x4102),
+       PSIL_ETHERNET(0x4103),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0x4200),
+       PSIL_ETHERNET(0x4201),
+       PSIL_ETHERNET(0x4202),
+       PSIL_ETHERNET(0x4203),
+       /* PRU_ICSSG2 */
+       PSIL_ETHERNET(0x4300),
+       PSIL_ETHERNET(0x4301),
+       PSIL_ETHERNET(0x4302),
+       PSIL_ETHERNET(0x4303),
+       /* PDMA0 - McASPs */
+       PSIL_PDMA_XY_TR(0x4400),
+       PSIL_PDMA_XY_TR(0x4401),
+       PSIL_PDMA_XY_TR(0x4402),
+       /* PDMA1 - SPI0-4 */
+       PSIL_PDMA_XY_PKT(0x4500),
+       PSIL_PDMA_XY_PKT(0x4501),
+       PSIL_PDMA_XY_PKT(0x4502),
+       PSIL_PDMA_XY_PKT(0x4503),
+       PSIL_PDMA_XY_PKT(0x4504),
+       PSIL_PDMA_XY_PKT(0x4505),
+       PSIL_PDMA_XY_PKT(0x4506),
+       PSIL_PDMA_XY_PKT(0x4507),
+       PSIL_PDMA_XY_PKT(0x4508),
+       PSIL_PDMA_XY_PKT(0x4509),
+       PSIL_PDMA_XY_PKT(0x450a),
+       PSIL_PDMA_XY_PKT(0x450b),
+       PSIL_PDMA_XY_PKT(0x450c),
+       PSIL_PDMA_XY_PKT(0x450d),
+       PSIL_PDMA_XY_PKT(0x450e),
+       PSIL_PDMA_XY_PKT(0x450f),
+       PSIL_PDMA_XY_PKT(0x4510),
+       PSIL_PDMA_XY_PKT(0x4511),
+       PSIL_PDMA_XY_PKT(0x4512),
+       PSIL_PDMA_XY_PKT(0x4513),
+       /* PDMA1 - USART0-2 */
+       PSIL_PDMA_XY_PKT(0x4514),
+       PSIL_PDMA_XY_PKT(0x4515),
+       PSIL_PDMA_XY_PKT(0x4516),
+       /* CPSW0 */
+       PSIL_ETHERNET(0x7000),
+       /* MCU_PDMA0 - ADCs */
+       PSIL_PDMA_XY_TR(0x7100),
+       PSIL_PDMA_XY_TR(0x7101),
+       PSIL_PDMA_XY_TR(0x7102),
+       PSIL_PDMA_XY_TR(0x7103),
+       /* MCU_PDMA1 - MCU_SPI0-2 */
+       PSIL_PDMA_XY_PKT(0x7200),
+       PSIL_PDMA_XY_PKT(0x7201),
+       PSIL_PDMA_XY_PKT(0x7202),
+       PSIL_PDMA_XY_PKT(0x7203),
+       PSIL_PDMA_XY_PKT(0x7204),
+       PSIL_PDMA_XY_PKT(0x7205),
+       PSIL_PDMA_XY_PKT(0x7206),
+       PSIL_PDMA_XY_PKT(0x7207),
+       PSIL_PDMA_XY_PKT(0x7208),
+       PSIL_PDMA_XY_PKT(0x7209),
+       PSIL_PDMA_XY_PKT(0x720a),
+       PSIL_PDMA_XY_PKT(0x720b),
+       /* MCU_PDMA1 - MCU_USART0 */
+       PSIL_PDMA_XY_PKT(0x7212),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am654_dst_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0xc000, 1),
+       PSIL_SA2UL(0xc001, 1),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0xc100),
+       PSIL_ETHERNET(0xc101),
+       PSIL_ETHERNET(0xc102),
+       PSIL_ETHERNET(0xc103),
+       PSIL_ETHERNET(0xc104),
+       PSIL_ETHERNET(0xc105),
+       PSIL_ETHERNET(0xc106),
+       PSIL_ETHERNET(0xc107),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0xc200),
+       PSIL_ETHERNET(0xc201),
+       PSIL_ETHERNET(0xc202),
+       PSIL_ETHERNET(0xc203),
+       PSIL_ETHERNET(0xc204),
+       PSIL_ETHERNET(0xc205),
+       PSIL_ETHERNET(0xc206),
+       PSIL_ETHERNET(0xc207),
+       /* PRU_ICSSG2 */
+       PSIL_ETHERNET(0xc300),
+       PSIL_ETHERNET(0xc301),
+       PSIL_ETHERNET(0xc302),
+       PSIL_ETHERNET(0xc303),
+       PSIL_ETHERNET(0xc304),
+       PSIL_ETHERNET(0xc305),
+       PSIL_ETHERNET(0xc306),
+       PSIL_ETHERNET(0xc307),
+       /* CPSW0 */
+       PSIL_ETHERNET(0xf000),
+       PSIL_ETHERNET(0xf001),
+       PSIL_ETHERNET(0xf002),
+       PSIL_ETHERNET(0xf003),
+       PSIL_ETHERNET(0xf004),
+       PSIL_ETHERNET(0xf005),
+       PSIL_ETHERNET(0xf006),
+       PSIL_ETHERNET(0xf007),
+};
+
+struct psil_ep_map am654_ep_map = {
+       .name = "am654",
+       .src = am654_src_ep_map,
+       .src_count = ARRAY_SIZE(am654_src_ep_map),
+       .dst = am654_dst_ep_map,
+       .dst_count = ARRAY_SIZE(am654_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
new file mode 100644 (file)
index 0000000..e3cfd5f
--- /dev/null
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+               },                                      \
+       }
+
+#define PSIL_PDMA_XY_PKT(x)                            \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pkt_mode = 1,                  \
+               },                                      \
+       }
+
+#define PSIL_PDMA_MCASP(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pdma_acc32 = 1,                \
+                       .pdma_burst = 1,                \
+               },                                      \
+       }
+
+#define PSIL_ETHERNET(x)                               \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 16,                 \
+               },                                      \
+       }
+
+#define PSIL_SA2UL(x, tx)                              \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 64,                 \
+                       .notdpkt = tx,                  \
+               },                                      \
+       }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721e_src_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0x4000, 0),
+       PSIL_SA2UL(0x4001, 0),
+       PSIL_SA2UL(0x4002, 0),
+       PSIL_SA2UL(0x4003, 0),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0x4100),
+       PSIL_ETHERNET(0x4101),
+       PSIL_ETHERNET(0x4102),
+       PSIL_ETHERNET(0x4103),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0x4200),
+       PSIL_ETHERNET(0x4201),
+       PSIL_ETHERNET(0x4202),
+       PSIL_ETHERNET(0x4203),
+       /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+       PSIL_PDMA_MCASP(0x4400),
+       PSIL_PDMA_MCASP(0x4401),
+       PSIL_PDMA_MCASP(0x4402),
+       /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+       PSIL_PDMA_MCASP(0x4500),
+       PSIL_PDMA_MCASP(0x4501),
+       PSIL_PDMA_MCASP(0x4502),
+       PSIL_PDMA_MCASP(0x4503),
+       PSIL_PDMA_MCASP(0x4504),
+       PSIL_PDMA_MCASP(0x4505),
+       PSIL_PDMA_MCASP(0x4506),
+       PSIL_PDMA_MCASP(0x4507),
+       PSIL_PDMA_MCASP(0x4508),
+       /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+       PSIL_PDMA_XY_PKT(0x4600),
+       PSIL_PDMA_XY_PKT(0x4601),
+       PSIL_PDMA_XY_PKT(0x4602),
+       PSIL_PDMA_XY_PKT(0x4603),
+       PSIL_PDMA_XY_PKT(0x4604),
+       PSIL_PDMA_XY_PKT(0x4605),
+       PSIL_PDMA_XY_PKT(0x4606),
+       PSIL_PDMA_XY_PKT(0x4607),
+       /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+       PSIL_PDMA_XY_PKT(0x460c),
+       PSIL_PDMA_XY_PKT(0x460d),
+       PSIL_PDMA_XY_PKT(0x460e),
+       PSIL_PDMA_XY_PKT(0x460f),
+       PSIL_PDMA_XY_PKT(0x4610),
+       PSIL_PDMA_XY_PKT(0x4611),
+       PSIL_PDMA_XY_PKT(0x4612),
+       PSIL_PDMA_XY_PKT(0x4613),
+       /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+       PSIL_PDMA_XY_PKT(0x4618),
+       PSIL_PDMA_XY_PKT(0x4619),
+       PSIL_PDMA_XY_PKT(0x461a),
+       PSIL_PDMA_XY_PKT(0x461b),
+       PSIL_PDMA_XY_PKT(0x461c),
+       PSIL_PDMA_XY_PKT(0x461d),
+       PSIL_PDMA_XY_PKT(0x461e),
+       PSIL_PDMA_XY_PKT(0x461f),
+       /* PDMA11 (PDMA_MISC_G3) */
+       PSIL_PDMA_XY_PKT(0x4624),
+       PSIL_PDMA_XY_PKT(0x4625),
+       PSIL_PDMA_XY_PKT(0x4626),
+       PSIL_PDMA_XY_PKT(0x4627),
+       PSIL_PDMA_XY_PKT(0x4628),
+       PSIL_PDMA_XY_PKT(0x4629),
+       PSIL_PDMA_XY_PKT(0x4630),
+       PSIL_PDMA_XY_PKT(0x463a),
+       /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+       PSIL_PDMA_XY_PKT(0x4700),
+       PSIL_PDMA_XY_PKT(0x4701),
+       /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+       PSIL_PDMA_XY_PKT(0x4702),
+       PSIL_PDMA_XY_PKT(0x4703),
+       /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+       PSIL_PDMA_XY_PKT(0x4704),
+       PSIL_PDMA_XY_PKT(0x4705),
+       PSIL_PDMA_XY_PKT(0x4706),
+       PSIL_PDMA_XY_PKT(0x4707),
+       PSIL_PDMA_XY_PKT(0x4708),
+       PSIL_PDMA_XY_PKT(0x4709),
+       /* CPSW9 */
+       PSIL_ETHERNET(0x4a00),
+       /* CPSW0 */
+       PSIL_ETHERNET(0x7000),
+       /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+       PSIL_PDMA_XY_PKT(0x7100),
+       PSIL_PDMA_XY_PKT(0x7101),
+       PSIL_PDMA_XY_PKT(0x7102),
+       PSIL_PDMA_XY_PKT(0x7103),
+       /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+       PSIL_PDMA_XY_PKT(0x7200),
+       PSIL_PDMA_XY_PKT(0x7201),
+       PSIL_PDMA_XY_PKT(0x7202),
+       PSIL_PDMA_XY_PKT(0x7203),
+       PSIL_PDMA_XY_PKT(0x7204),
+       PSIL_PDMA_XY_PKT(0x7205),
+       PSIL_PDMA_XY_PKT(0x7206),
+       PSIL_PDMA_XY_PKT(0x7207),
+       /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+       PSIL_PDMA_XY_PKT(0x7300),
+       /* MCU_PDMA_ADC - ADC0-1 */
+       PSIL_PDMA_XY_TR(0x7400),
+       PSIL_PDMA_XY_TR(0x7401),
+       PSIL_PDMA_XY_TR(0x7402),
+       PSIL_PDMA_XY_TR(0x7403),
+       /* SA2UL */
+       PSIL_SA2UL(0x7500, 0),
+       PSIL_SA2UL(0x7501, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721e_dst_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0xc000, 1),
+       PSIL_SA2UL(0xc001, 1),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0xc100),
+       PSIL_ETHERNET(0xc101),
+       PSIL_ETHERNET(0xc102),
+       PSIL_ETHERNET(0xc103),
+       PSIL_ETHERNET(0xc104),
+       PSIL_ETHERNET(0xc105),
+       PSIL_ETHERNET(0xc106),
+       PSIL_ETHERNET(0xc107),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0xc200),
+       PSIL_ETHERNET(0xc201),
+       PSIL_ETHERNET(0xc202),
+       PSIL_ETHERNET(0xc203),
+       PSIL_ETHERNET(0xc204),
+       PSIL_ETHERNET(0xc205),
+       PSIL_ETHERNET(0xc206),
+       PSIL_ETHERNET(0xc207),
+       /* CPSW9 */
+       PSIL_ETHERNET(0xca00),
+       PSIL_ETHERNET(0xca01),
+       PSIL_ETHERNET(0xca02),
+       PSIL_ETHERNET(0xca03),
+       PSIL_ETHERNET(0xca04),
+       PSIL_ETHERNET(0xca05),
+       PSIL_ETHERNET(0xca06),
+       PSIL_ETHERNET(0xca07),
+       /* CPSW0 */
+       PSIL_ETHERNET(0xf000),
+       PSIL_ETHERNET(0xf001),
+       PSIL_ETHERNET(0xf002),
+       PSIL_ETHERNET(0xf003),
+       PSIL_ETHERNET(0xf004),
+       PSIL_ETHERNET(0xf005),
+       PSIL_ETHERNET(0xf006),
+       PSIL_ETHERNET(0xf007),
+       /* SA2UL */
+       PSIL_SA2UL(0xf500, 1),
+};
+
+struct psil_ep_map j721e_ep_map = {
+       .name = "j721e",
+       .src = j721e_src_ep_map,
+       .src_count = ARRAY_SIZE(j721e_src_ep_map),
+       .dst = j721e_dst_ep_map,
+       .dst_count = ARRAY_SIZE(j721e_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
new file mode 100644 (file)
index 0000000..a1f389c
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_PRIV_H_
+#define K3_PSIL_PRIV_H_
+
+#include <linux/dma/k3-psil.h>
+
+struct psil_ep {
+       u32 thread_id;
+       struct psil_endpoint_config ep_config;
+};
+
+/**
+ * struct psil_ep_map - PSI-L thread ID configuration maps
+ * @name:      Name of the map, set it to the name of the SoC
+ * @src:       Array of source PSI-L thread configurations
+ * @src_count: Number of entries in the src array
+ * @dst:       Array of destination PSI-L thread configurations
+ * @dst_count: Number of entries in the dst array
+ *
+ * In case of symmetric configuration for a matching src/dst thread (for example
+ * 0x4400 and 0xc400) only the src configuration can be present. If no dst
+ * configuration found the code will look for (dst_thread_id & ~0x8000) to find
+ * the symmetric match.
+ */
+struct psil_ep_map {
+       char *name;
+       struct psil_ep  *src;
+       int src_count;
+       struct psil_ep  *dst;
+       int dst_count;
+};
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
+
+/* SoC PSI-L endpoint maps */
+extern struct psil_ep_map am654_ep_map;
+extern struct psil_ep_map j721e_ep_map;
+
+#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
new file mode 100644 (file)
index 0000000..d7b9650
--- /dev/null
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "k3-psil-priv.h"
+
+static DEFINE_MUTEX(ep_map_mutex);
+static struct psil_ep_map *soc_ep_map;
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
+{
+       int i;
+
+       mutex_lock(&ep_map_mutex);
+       if (!soc_ep_map) {
+               if (of_machine_is_compatible("ti,am654")) {
+                       soc_ep_map = &am654_ep_map;
+               } else if (of_machine_is_compatible("ti,j721e")) {
+                       soc_ep_map = &j721e_ep_map;
+               } else {
+                       pr_err("PSIL: No compatible machine found for map\n");
+                       return ERR_PTR(-ENOTSUPP);
+               }
+               pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+       }
+       mutex_unlock(&ep_map_mutex);
+
+       if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) {
+               /* check in destination thread map */
+               for (i = 0; i < soc_ep_map->dst_count; i++) {
+                       if (soc_ep_map->dst[i].thread_id == thread_id)
+                               return &soc_ep_map->dst[i].ep_config;
+               }
+       }
+
+       thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET;
+       if (soc_ep_map->src) {
+               for (i = 0; i < soc_ep_map->src_count; i++) {
+                       if (soc_ep_map->src[i].thread_id == thread_id)
+                               return &soc_ep_map->src[i].ep_config;
+               }
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(psil_get_ep_config);
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+                          struct psil_endpoint_config *ep_config)
+{
+       struct psil_endpoint_config *dst_ep_config;
+       struct of_phandle_args dma_spec;
+       u32 thread_id;
+       int index;
+
+       if (!dev || !dev->of_node)
+               return -EINVAL;
+
+       index = of_property_match_string(dev->of_node, "dma-names", name);
+       if (index < 0)
+               return index;
+
+       if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells",
+                                      index, &dma_spec))
+               return -ENOENT;
+
+       thread_id = dma_spec.args[0];
+
+       dst_ep_config = psil_get_ep_config(thread_id);
+       if (IS_ERR(dst_ep_config)) {
+               pr_err("PSIL: thread ID 0x%04x not defined in map\n",
+                      thread_id);
+               of_node_put(dma_spec.np);
+               return PTR_ERR(dst_ep_config);
+       }
+
+       memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config));
+
+       of_node_put(dma_spec.np);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
new file mode 100644 (file)
index 0000000..c151129
--- /dev/null
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 NAVSS DMA glue interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct k3_udma_glue_common {
+       struct device *dev;
+       struct udma_dev *udmax;
+       const struct udma_tisci_rm *tisci_rm;
+       struct k3_ringacc *ringacc;
+       u32 src_thread;
+       u32 dst_thread;
+
+       u32  hdesc_size;
+       bool epib;
+       u32  psdata_size;
+       u32  swdata_size;
+};
+
+struct k3_udma_glue_tx_channel {
+       struct k3_udma_glue_common common;
+
+       struct udma_tchan *udma_tchanx;
+       int udma_tchan_id;
+
+       struct k3_ring *ringtx;
+       struct k3_ring *ringtxcq;
+
+       bool psil_paired;
+
+       int virq;
+
+       atomic_t free_pkts;
+       bool tx_pause_on_err;
+       bool tx_filt_einfo;
+       bool tx_filt_pswords;
+       bool tx_supr_tdpkt;
+};
+
+struct k3_udma_glue_rx_flow {
+       struct udma_rflow *udma_rflow;
+       int udma_rflow_id;
+       struct k3_ring *ringrx;
+       struct k3_ring *ringrxfdq;
+
+       int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+       struct k3_udma_glue_common common;
+
+       struct udma_rchan *udma_rchanx;
+       int udma_rchan_id;
+       bool remote;
+
+       bool psil_paired;
+
+       u32  swdata_size;
+       int  flow_id_base;
+
+       struct k3_udma_glue_rx_flow *flows;
+       u32 flow_num;
+       u32 flows_ready;
+};
+
+#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
+
+static int of_k3_udma_glue_parse(struct device_node *udmax_np,
+                                struct k3_udma_glue_common *common)
+{
+       common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
+                                                      "ti,ringacc");
+       if (IS_ERR(common->ringacc))
+               return PTR_ERR(common->ringacc);
+
+       common->udmax = of_xudma_dev_get(udmax_np, NULL);
+       if (IS_ERR(common->udmax))
+               return PTR_ERR(common->udmax);
+
+       common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
+
+       return 0;
+}
+
+static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
+               const char *name, struct k3_udma_glue_common *common,
+               bool tx_chn)
+{
+       struct psil_endpoint_config *ep_config;
+       struct of_phandle_args dma_spec;
+       u32 thread_id;
+       int ret = 0;
+       int index;
+
+       if (unlikely(!name))
+               return -EINVAL;
+
+       index = of_property_match_string(chn_np, "dma-names", name);
+       if (index < 0)
+               return index;
+
+       if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
+                                      &dma_spec))
+               return -ENOENT;
+
+       thread_id = dma_spec.args[0];
+
+       if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+               ret = -EINVAL;
+               goto out_put_spec;
+       }
+
+       if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+               ret = -EINVAL;
+               goto out_put_spec;
+       }
+
+       /* get psil endpoint config */
+       ep_config = psil_get_ep_config(thread_id);
+       if (IS_ERR(ep_config)) {
+               dev_err(common->dev,
+                       "No configuration for psi-l thread 0x%04x\n",
+                       thread_id);
+               ret = PTR_ERR(ep_config);
+               goto out_put_spec;
+       }
+
+       common->epib = ep_config->needs_epib;
+       common->psdata_size = ep_config->psd_size;
+
+       if (tx_chn)
+               common->dst_thread = thread_id;
+       else
+               common->src_thread = thread_id;
+
+       ret = of_k3_udma_glue_parse(dma_spec.np, common);
+
+out_put_spec:
+       of_node_put(dma_spec.np);
+       return ret;
+};
+
+static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       struct device *dev = tx_chn->common.dev;
+
+       dev_dbg(dev, "dump_tx_chn:\n"
+               "udma_tchan_id: %d\n"
+               "src_thread: %08x\n"
+               "dst_thread: %08x\n",
+               tx_chn->udma_tchan_id,
+               tx_chn->common.src_thread,
+               tx_chn->common.dst_thread);
+}
+
+static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
+                                       char *mark)
+{
+       struct device *dev = chn->common.dev;
+
+       dev_dbg(dev, "=== dump ===> %s\n", mark);
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
+               xudma_tchanrt_read(chn->udma_tchanx,
+                                  UDMA_TCHAN_RT_PEER_RT_EN_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
+}
+
+static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
+       struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.index = tx_chn->udma_tchan_id;
+       if (tx_chn->tx_pause_on_err)
+               req.tx_pause_on_err = 1;
+       if (tx_chn->tx_filt_einfo)
+               req.tx_filt_einfo = 1;
+       if (tx_chn->tx_filt_pswords)
+               req.tx_filt_pswords = 1;
+       req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+       if (tx_chn->tx_supr_tdpkt)
+               req.tx_supr_tdpkt = 1;
+       req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
+       req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+       return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
+}
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+               const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+       struct k3_udma_glue_tx_channel *tx_chn;
+       int ret;
+
+       tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+       if (!tx_chn)
+               return ERR_PTR(-ENOMEM);
+
+       tx_chn->common.dev = dev;
+       tx_chn->common.swdata_size = cfg->swdata_size;
+       tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+       tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+       tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+       tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+       /* parse of udmap channel */
+       ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+                                       &tx_chn->common, true);
+       if (ret)
+               goto err;
+
+       tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
+                                               tx_chn->common.psdata_size,
+                                               tx_chn->common.swdata_size);
+
+       /* request and cfg UDMAP TX channel */
+       tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+       if (IS_ERR(tx_chn->udma_tchanx)) {
+               ret = PTR_ERR(tx_chn->udma_tchanx);
+               dev_err(dev, "UDMAX tchanx get err %d\n", ret);
+               goto err;
+       }
+       tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+
+       atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+
+       /* request and cfg rings */
+       tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
+                                                tx_chn->udma_tchan_id, 0);
+       if (!tx_chn->ringtx) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get TX ring %u\n",
+                       tx_chn->udma_tchan_id);
+               goto err;
+       }
+
+       tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
+                                                  -1, 0);
+       if (!tx_chn->ringtxcq) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get TXCQ ring\n");
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+               goto err;
+       }
+
+       /* request and cfg psi-l */
+       tx_chn->common.src_thread =
+                       xudma_dev_get_psil_base(tx_chn->common.udmax) +
+                       tx_chn->udma_tchan_id;
+
+       ret = k3_udma_glue_cfg_tx_chn(tx_chn);
+       if (ret) {
+               dev_err(dev, "Failed to cfg tchan %d\n", ret);
+               goto err;
+       }
+
+       ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+                                   tx_chn->common.src_thread,
+                                   tx_chn->common.dst_thread);
+       if (ret) {
+               dev_err(dev, "PSI-L request err %d\n", ret);
+               goto err;
+       }
+
+       tx_chn->psil_paired = true;
+
+       /* reset TX RT registers */
+       k3_udma_glue_disable_tx_chn(tx_chn);
+
+       k3_udma_glue_dump_tx_chn(tx_chn);
+
+       return tx_chn;
+
+err:
+       k3_udma_glue_release_tx_chn(tx_chn);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       if (tx_chn->psil_paired) {
+               xudma_navss_psil_unpair(tx_chn->common.udmax,
+                                       tx_chn->common.src_thread,
+                                       tx_chn->common.dst_thread);
+               tx_chn->psil_paired = false;
+       }
+
+       if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
+               xudma_tchan_put(tx_chn->common.udmax,
+                               tx_chn->udma_tchanx);
+
+       if (tx_chn->ringtxcq)
+               k3_ringacc_ring_free(tx_chn->ringtxcq);
+
+       if (tx_chn->ringtx)
+               k3_ringacc_ring_free(tx_chn->ringtx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
+
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                            struct cppi5_host_desc_t *desc_tx,
+                            dma_addr_t desc_dma)
+{
+       u32 ringtxcq_id;
+
+       if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
+               return -ENOMEM;
+
+       ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+       cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
+
+       return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
+
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                           dma_addr_t *desc_dma)
+{
+       int ret;
+
+       ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
+       if (!ret)
+               atomic_inc(&tx_chn->free_pkts);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       u32 txrt_ctl;
+
+       txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+       xudma_tchanrt_write(tx_chn->udma_tchanx,
+                           UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                           txrt_ctl);
+
+       txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+                                     UDMA_TCHAN_RT_CTL_REG);
+       txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+       xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+                           txrt_ctl);
+
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+       return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
+
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
+
+       xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
+
+       xudma_tchanrt_write(tx_chn->udma_tchanx,
+                           UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
+
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                              bool sync)
+{
+       int i = 0;
+       u32 val;
+
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
+
+       xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+                           UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
+
+       val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
+
+       while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+               val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+                                        UDMA_TCHAN_RT_CTL_REG);
+               udelay(1);
+               if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+                       dev_err(tx_chn->common.dev, "TX tdown timeout\n");
+                       break;
+               }
+               i++;
+       }
+
+       val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+                                UDMA_TCHAN_RT_PEER_RT_EN_REG);
+       if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+               dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
+
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                              void *data,
+                              void (*cleanup)(void *data, dma_addr_t desc_dma))
+{
+       dma_addr_t desc_dma;
+       int occ_tx, i, ret;
+
+       /* reset TXCQ as it is not input for udma - expected to be empty */
+       if (tx_chn->ringtxcq)
+               k3_ringacc_ring_reset(tx_chn->ringtxcq);
+
+       /*
+        * TXQ reset need to be special way as it is input for udma and its
+        * state cached by udma, so:
+        * 1) save TXQ occ
+        * 2) clean up TXQ and call callback .cleanup() for each desc
+        * 3) reset TXQ in a special way
+        */
+       occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
+       dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+
+       for (i = 0; i < occ_tx; i++) {
+               ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
+               if (ret) {
+                       dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+                       break;
+               }
+               cleanup(data, desc_dma);
+       }
+
+       k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
+
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       return tx_chn->common.hdesc_size;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
+
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
+
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+
+       return tx_chn->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+
+static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
+       int ret;
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.index = rx_chn->udma_rchan_id;
+       req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
+       /*
+        * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
+        * and udmax impl, so just configure it to invalid value.
+        * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
+        */
+       req.rxcq_qnum = 0xFFFF;
+       if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+               /* Default flow + extra ones */
+               req.flowid_start = rx_chn->flow_id_base;
+               req.flowid_cnt = rx_chn->flow_num;
+       }
+       req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
+       if (ret)
+               dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
+                       rx_chn->udma_rchan_id, ret);
+
+       return ret;
+}
+
+static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+                                        u32 flow_num)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+       if (IS_ERR_OR_NULL(flow->udma_rflow))
+               return;
+
+       if (flow->ringrxfdq)
+               k3_ringacc_ring_free(flow->ringrxfdq);
+
+       if (flow->ringrx)
+               k3_ringacc_ring_free(flow->ringrx);
+
+       xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+       flow->udma_rflow = NULL;
+       rx_chn->flows_ready--;
+}
+
+static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+                                   u32 flow_idx,
+                                   struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct device *dev = rx_chn->common.dev;
+       struct ti_sci_msg_rm_udmap_flow_cfg req;
+       int rx_ring_id;
+       int rx_ringfdq_id;
+       int ret = 0;
+
+       flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
+                                          flow->udma_rflow_id);
+       if (IS_ERR(flow->udma_rflow)) {
+               ret = PTR_ERR(flow->udma_rflow);
+               dev_err(dev, "UDMAX rflow get err %d\n", ret);
+               goto err;
+       }
+
+       if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
+               xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+               return -ENODEV;
+       }
+
+       /* request and cfg rings */
+       flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
+                                              flow_cfg->ring_rxq_id, 0);
+       if (!flow->ringrx) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get RX ring\n");
+               goto err;
+       }
+
+       flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
+                                                 flow_cfg->ring_rxfdq0_id, 0);
+       if (!flow->ringrxfdq) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get RXFDQ ring\n");
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringrx %d\n", ret);
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
+               goto err;
+       }
+
+       if (rx_chn->remote) {
+               rx_ring_id = TI_SCI_RESOURCE_NULL;
+               rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
+       } else {
+               rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+               rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+       }
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params =
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.flow_index = flow->udma_rflow_id;
+       if (rx_chn->common.epib)
+               req.rx_einfo_present = 1;
+       if (rx_chn->common.psdata_size)
+               req.rx_psinfo_present = 1;
+       if (flow_cfg->rx_error_handling)
+               req.rx_error_handling = 1;
+       req.rx_desc_type = 0;
+       req.rx_dest_qnum = rx_ring_id;
+       req.rx_src_tag_hi_sel = 0;
+       req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
+       req.rx_dest_tag_hi_sel = 0;
+       req.rx_dest_tag_lo_sel = 0;
+       req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+       req.rx_fdq1_qnum = rx_ringfdq_id;
+       req.rx_fdq2_qnum = rx_ringfdq_id;
+       req.rx_fdq3_qnum = rx_ringfdq_id;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+       if (ret) {
+               dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
+                       ret);
+               goto err;
+       }
+
+       rx_chn->flows_ready++;
+       dev_dbg(dev, "flow%d config done. ready:%d\n",
+               flow->udma_rflow_id, rx_chn->flows_ready);
+
+       return 0;
+err:
+       k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+       return ret;
+}
+
+static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
+{
+       struct device *dev = chn->common.dev;
+
+       dev_dbg(dev, "dump_rx_chn:\n"
+               "udma_rchan_id: %d\n"
+               "src_thread: %08x\n"
+               "dst_thread: %08x\n"
+               "epib: %d\n"
+               "hdesc_size: %u\n"
+               "psdata_size: %u\n"
+               "swdata_size: %u\n"
+               "flow_id_base: %d\n"
+               "flow_num: %d\n",
+               chn->udma_rchan_id,
+               chn->common.src_thread,
+               chn->common.dst_thread,
+               chn->common.epib,
+               chn->common.hdesc_size,
+               chn->common.psdata_size,
+               chn->common.swdata_size,
+               chn->flow_id_base,
+               chn->flow_num);
+}
+
+static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
+                                       char *mark)
+{
+       struct device *dev = chn->common.dev;
+
+       dev_dbg(dev, "=== dump ===> %s\n", mark);
+
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
+               xudma_rchanrt_read(chn->udma_rchanx,
+                                  UDMA_RCHAN_RT_PEER_RT_EN_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
+}
+
+static int
+k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
+                              struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       int ret;
+
+       /* default rflow */
+       if (cfg->flow_id_use_rxchan_id)
+               return 0;
+
+       /* not a GP rflows */
+       if (rx_chn->flow_id_base != -1 &&
+           !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+               return 0;
+
+       /* Allocate range of GP rflows */
+       ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+                                        rx_chn->flow_id_base,
+                                        rx_chn->flow_num);
+       if (ret < 0) {
+               dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+                       rx_chn->flow_id_base, rx_chn->flow_num, ret);
+               return ret;
+       }
+       rx_chn->flow_id_base = ret;
+
+       return 0;
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
+                                struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       struct k3_udma_glue_rx_channel *rx_chn;
+       int ret, i;
+
+       if (cfg->flow_id_num <= 0)
+               return ERR_PTR(-EINVAL);
+
+       if (cfg->flow_id_num != 1 &&
+           (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
+               return ERR_PTR(-EINVAL);
+
+       rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+       if (!rx_chn)
+               return ERR_PTR(-ENOMEM);
+
+       rx_chn->common.dev = dev;
+       rx_chn->common.swdata_size = cfg->swdata_size;
+       rx_chn->remote = false;
+
+       /* parse of udmap channel */
+       ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+                                       &rx_chn->common, false);
+       if (ret)
+               goto err;
+
+       rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+                                               rx_chn->common.psdata_size,
+                                               rx_chn->common.swdata_size);
+
+       /* request and cfg UDMAP RX channel */
+       rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+       if (IS_ERR(rx_chn->udma_rchanx)) {
+               ret = PTR_ERR(rx_chn->udma_rchanx);
+               dev_err(dev, "UDMAX rchanx get err %d\n", ret);
+               goto err;
+       }
+       rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
+
+       rx_chn->flow_num = cfg->flow_id_num;
+       rx_chn->flow_id_base = cfg->flow_id_base;
+
+       /* Use RX channel id as flow id: target dev can't generate flow_id */
+       if (cfg->flow_id_use_rxchan_id)
+               rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+
+       rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+                                    sizeof(*rx_chn->flows), GFP_KERNEL);
+       if (!rx_chn->flows) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < rx_chn->flow_num; i++)
+               rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+       /* request and cfg psi-l */
+       rx_chn->common.dst_thread =
+                       xudma_dev_get_psil_base(rx_chn->common.udmax) +
+                       rx_chn->udma_rchan_id;
+
+       ret = k3_udma_glue_cfg_rx_chn(rx_chn);
+       if (ret) {
+               dev_err(dev, "Failed to cfg rchan %d\n", ret);
+               goto err;
+       }
+
+       /* init default RX flow only if flow_num = 1 */
+       if (cfg->def_flow_cfg) {
+               ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
+               if (ret)
+                       goto err;
+       }
+
+       ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+                                   rx_chn->common.src_thread,
+                                   rx_chn->common.dst_thread);
+       if (ret) {
+               dev_err(dev, "PSI-L request err %d\n", ret);
+               goto err;
+       }
+
+       rx_chn->psil_paired = true;
+
+       /* reset RX RT registers */
+       k3_udma_glue_disable_rx_chn(rx_chn);
+
+       k3_udma_glue_dump_rx_chn(rx_chn);
+
+       return rx_chn;
+
+err:
+       k3_udma_glue_release_rx_chn(rx_chn);
+       return ERR_PTR(ret);
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
+                                  struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       struct k3_udma_glue_rx_channel *rx_chn;
+       int ret, i;
+
+       if (cfg->flow_id_num <= 0 ||
+           cfg->flow_id_use_rxchan_id ||
+           cfg->def_flow_cfg ||
+           cfg->flow_id_base < 0)
+               return ERR_PTR(-EINVAL);
+
+       /*
+        * Remote RX channel is under control of Remote CPU core, so
+        * Linux can only request and manipulate by dedicated RX flows
+        */
+
+       rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+       if (!rx_chn)
+               return ERR_PTR(-ENOMEM);
+
+       rx_chn->common.dev = dev;
+       rx_chn->common.swdata_size = cfg->swdata_size;
+       rx_chn->remote = true;
+       rx_chn->udma_rchan_id = -1;
+       rx_chn->flow_num = cfg->flow_id_num;
+       rx_chn->flow_id_base = cfg->flow_id_base;
+       rx_chn->psil_paired = false;
+
+       /* parse of udmap channel */
+       ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+                                       &rx_chn->common, false);
+       if (ret)
+               goto err;
+
+       rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+                                               rx_chn->common.psdata_size,
+                                               rx_chn->common.swdata_size);
+
+       rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+                                    sizeof(*rx_chn->flows), GFP_KERNEL);
+       if (!rx_chn->flows) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < rx_chn->flow_num; i++)
+               rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+       k3_udma_glue_dump_rx_chn(rx_chn);
+
+       return rx_chn;
+
+err:
+       k3_udma_glue_release_rx_chn(rx_chn);
+       return ERR_PTR(ret);
+}
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+                           struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       if (cfg->remote)
+               return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
+       else
+               return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       int i;
+
+       if (IS_ERR_OR_NULL(rx_chn->common.udmax))
+               return;
+
+       if (rx_chn->psil_paired) {
+               xudma_navss_psil_unpair(rx_chn->common.udmax,
+                                       rx_chn->common.src_thread,
+                                       rx_chn->common.dst_thread);
+               rx_chn->psil_paired = false;
+       }
+
+       for (i = 0; i < rx_chn->flow_num; i++)
+               k3_udma_glue_release_rx_flow(rx_chn, i);
+
+       if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+               xudma_free_gp_rflow_range(rx_chn->common.udmax,
+                                         rx_chn->flow_id_base,
+                                         rx_chn->flow_num);
+
+       if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
+               xudma_rchan_put(rx_chn->common.udmax,
+                               rx_chn->udma_rchanx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
+
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+                             u32 flow_idx,
+                             struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+       if (flow_idx >= rx_chn->flow_num)
+               return -EINVAL;
+
+       return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
+
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+                                   u32 flow_idx)
+{
+       struct k3_udma_glue_rx_flow *flow;
+
+       if (flow_idx >= rx_chn->flow_num)
+               return -EINVAL;
+
+       flow = &rx_chn->flows[flow_idx];
+
+       return k3_ringacc_get_ring_id(flow->ringrxfdq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
+
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       return rx_chn->flow_id_base;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
+
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+                               u32 flow_idx)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct device *dev = rx_chn->common.dev;
+       struct ti_sci_msg_rm_udmap_flow_cfg req;
+       int rx_ring_id;
+       int rx_ringfdq_id;
+       int ret = 0;
+
+       if (!rx_chn->remote)
+               return -EINVAL;
+
+       rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+       rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params =
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.flow_index = flow->udma_rflow_id;
+       req.rx_dest_qnum = rx_ring_id;
+       req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+       req.rx_fdq1_qnum = rx_ringfdq_id;
+       req.rx_fdq2_qnum = rx_ringfdq_id;
+       req.rx_fdq3_qnum = rx_ringfdq_id;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+       if (ret) {
+               dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
+                       ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
+
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+                                u32 flow_idx)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct device *dev = rx_chn->common.dev;
+       struct ti_sci_msg_rm_udmap_flow_cfg req;
+       int ret = 0;
+
+       if (!rx_chn->remote)
+               return -EINVAL;
+
+       memset(&req, 0, sizeof(req));
+       req.valid_params =
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.flow_index = flow->udma_rflow_id;
+       req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+       if (ret) {
+               dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
+                       ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       u32 rxrt_ctl;
+
+       if (rx_chn->remote)
+               return -EINVAL;
+
+       if (rx_chn->flows_ready < rx_chn->flow_num)
+               return -EINVAL;
+
+       rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+                                     UDMA_RCHAN_RT_CTL_REG);
+       rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+       xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
+                           rxrt_ctl);
+
+       xudma_rchanrt_write(rx_chn->udma_rchanx,
+                           UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                           UDMA_PEER_RT_EN_ENABLE);
+
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
+       return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
+
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
+
+       xudma_rchanrt_write(rx_chn->udma_rchanx,
+                           UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                           0);
+       xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
+
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
+
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                              bool sync)
+{
+       int i = 0;
+       u32 val;
+
+       if (rx_chn->remote)
+               return;
+
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
+
+       xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                           UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
+
+       val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
+
+       while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+               val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+                                        UDMA_RCHAN_RT_CTL_REG);
+               udelay(1);
+               if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+                       dev_err(rx_chn->common.dev, "RX tdown timeout\n");
+                       break;
+               }
+               i++;
+       }
+
+       val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+                                UDMA_RCHAN_RT_PEER_RT_EN_REG);
+       if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+               dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
+
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, void *data,
+               void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+       struct device *dev = rx_chn->common.dev;
+       dma_addr_t desc_dma;
+       int occ_rx, i, ret;
+
+       /* reset RXCQ as it is not input for udma - expected to be empty */
+       occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
+       dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
+       if (flow->ringrx)
+               k3_ringacc_ring_reset(flow->ringrx);
+
+       /* Skip RX FDQ in case one FDQ is used for the set of flows */
+       if (skip_fdq)
+               return;
+
+       /*
+        * RX FDQ reset need to be special way as it is input for udma and its
+        * state cached by udma, so:
+        * 1) save RX FDQ occ
+        * 2) clean up RX FDQ and call callback .cleanup() for each desc
+        * 3) reset RX FDQ in a special way
+        */
+       occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
+       dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
+
+       for (i = 0; i < occ_rx; i++) {
+               ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
+               if (ret) {
+                       dev_err(dev, "RX reset pop %d\n", ret);
+                       break;
+               }
+               cleanup(data, desc_dma);
+       }
+
+       k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
+
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                            u32 flow_num, struct cppi5_host_desc_t *desc_rx,
+                            dma_addr_t desc_dma)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+       return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
+
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                           u32 flow_num, dma_addr_t *desc_dma)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+       return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
+
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+                           u32 flow_num)
+{
+       struct k3_udma_glue_rx_flow *flow;
+
+       flow = &rx_chn->flows[flow_num];
+
+       flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+
+       return flow->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
new file mode 100644 (file)
index 0000000..0b8f3dd
--- /dev/null
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+       return navss_psil_pair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_pair);
+
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+       return navss_psil_unpair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_unpair);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+{
+       struct device_node *udma_node = np;
+       struct platform_device *pdev;
+       struct udma_dev *ud;
+
+       if (property) {
+               udma_node = of_parse_phandle(np, property, 0);
+               if (!udma_node) {
+                       pr_err("UDMA node is not found\n");
+                       return ERR_PTR(-ENODEV);
+               }
+       }
+
+       pdev = of_find_device_by_node(udma_node);
+       if (!pdev) {
+               pr_debug("UDMA device not found\n");
+               return ERR_PTR(-EPROBE_DEFER);
+       }
+
+       if (np != udma_node)
+               of_node_put(udma_node);
+
+       ud = platform_get_drvdata(pdev);
+       if (!ud) {
+               pr_debug("UDMA has not been probed\n");
+               return ERR_PTR(-EPROBE_DEFER);
+       }
+
+       return ud;
+}
+EXPORT_SYMBOL(of_xudma_dev_get);
+
+u32 xudma_dev_get_psil_base(struct udma_dev *ud)
+{
+       return ud->psil_base;
+}
+EXPORT_SYMBOL(xudma_dev_get_psil_base);
+
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
+{
+       return &ud->tisci_rm;
+}
+EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       return __udma_alloc_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
+
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       return __udma_free_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+       return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
+
+#define XUDMA_GET_PUT_RESOURCE(res)                                    \
+struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id)      \
+{                                                                      \
+       return __udma_reserve_##res(ud, false, id);                     \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##_get);                                      \
+                                                                       \
+void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p)      \
+{                                                                      \
+       clear_bit(p->id, ud->res##_map);                                \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##_put)
+XUDMA_GET_PUT_RESOURCE(tchan);
+XUDMA_GET_PUT_RESOURCE(rchan);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+       return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+       __udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
+
+#define XUDMA_GET_RESOURCE_ID(res)                                     \
+int xudma_##res##_get_id(struct udma_##res *p)                         \
+{                                                                      \
+       return p->id;                                                   \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##_get_id)
+XUDMA_GET_RESOURCE_ID(tchan);
+XUDMA_GET_RESOURCE_ID(rchan);
+XUDMA_GET_RESOURCE_ID(rflow);
+
+/* Exported register access functions */
+#define XUDMA_RT_IO_FUNCTIONS(res)                                     \
+u32 xudma_##res##rt_read(struct udma_##res *p, int reg)                        \
+{                                                                      \
+       return udma_##res##rt_read(p, reg);                             \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##rt_read);                                   \
+                                                                       \
+void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val)     \
+{                                                                      \
+       udma_##res##rt_write(p, reg, val);                              \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##rt_write)
+XUDMA_RT_IO_FUNCTIONS(tchan);
+XUDMA_RT_IO_FUNCTIONS(rchan);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
new file mode 100644 (file)
index 0000000..ea79c2d
--- /dev/null
@@ -0,0 +1,3432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "../virt-dma.h"
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct udma_static_tr {
+       u8 elsize; /* RPSTR0 */
+       u16 elcnt; /* RPSTR0 */
+       u16 bstcnt; /* RPSTR1 */
+};
+
+#define K3_UDMA_MAX_RFLOWS             1024
+#define K3_UDMA_DEFAULT_RING_SIZE      16
+
+/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
+#define UDMA_RFLOW_SRCTAG_NONE         0
+#define UDMA_RFLOW_SRCTAG_CFG_TAG      1
+#define UDMA_RFLOW_SRCTAG_FLOW_ID      2
+#define UDMA_RFLOW_SRCTAG_SRC_TAG      4
+
+#define UDMA_RFLOW_DSTTAG_NONE         0
+#define UDMA_RFLOW_DSTTAG_CFG_TAG      1
+#define UDMA_RFLOW_DSTTAG_FLOW_ID      2
+#define UDMA_RFLOW_DSTTAG_DST_TAG_LO   4
+#define UDMA_RFLOW_DSTTAG_DST_TAG_HI   5
+
+struct udma_chan;
+
+enum udma_mmr {
+       MMR_GCFG = 0,
+       MMR_RCHANRT,
+       MMR_TCHANRT,
+       MMR_LAST,
+};
+
+static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+
+struct udma_tchan {
+       void __iomem *reg_rt;
+
+       int id;
+       struct k3_ring *t_ring; /* Transmit ring */
+       struct k3_ring *tc_ring; /* Transmit Completion ring */
+};
+
+struct udma_rflow {
+       int id;
+       struct k3_ring *fd_ring; /* Free Descriptor ring */
+       struct k3_ring *r_ring; /* Receive ring */
+};
+
+struct udma_rchan {
+       void __iomem *reg_rt;
+
+       int id;
+};
+
+#define UDMA_FLAG_PDMA_ACC32           BIT(0)
+#define UDMA_FLAG_PDMA_BURST           BIT(1)
+
+struct udma_match_data {
+       u32 psil_base;
+       bool enable_memcpy_support;
+       u32 flags;
+       u32 statictr_z_mask;
+       u32 rchan_oes_offset;
+
+       u8 tpl_levels;
+       u32 level_start_idx[];
+};
+
+struct udma_dev {
+       struct dma_device ddev;
+       struct device *dev;
+       void __iomem *mmrs[MMR_LAST];
+       const struct udma_match_data *match_data;
+
+       size_t desc_align; /* alignment to use for descriptors */
+
+       struct udma_tisci_rm tisci_rm;
+
+       struct k3_ringacc *ringacc;
+
+       struct work_struct purge_work;
+       struct list_head desc_to_purge;
+       spinlock_t lock;
+
+       int tchan_cnt;
+       int echan_cnt;
+       int rchan_cnt;
+       int rflow_cnt;
+       unsigned long *tchan_map;
+       unsigned long *rchan_map;
+       unsigned long *rflow_gp_map;
+       unsigned long *rflow_gp_map_allocated;
+       unsigned long *rflow_in_use;
+
+       struct udma_tchan *tchans;
+       struct udma_rchan *rchans;
+       struct udma_rflow *rflows;
+
+       struct udma_chan *channels;
+       u32 psil_base;
+};
+
+struct udma_hwdesc {
+       size_t cppi5_desc_size;
+       void *cppi5_desc_vaddr;
+       dma_addr_t cppi5_desc_paddr;
+
+       /* TR descriptor internal pointers */
+       void *tr_req_base;
+       struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_desc {
+       struct virt_dma_desc vd;
+
+       bool terminated;
+
+       enum dma_transfer_direction dir;
+
+       struct udma_static_tr static_tr;
+       u32 residue;
+
+       unsigned int sglen;
+       unsigned int desc_idx; /* Only used for cyclic in packet mode */
+       unsigned int tr_idx;
+
+       u32 metadata_size;
+       void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
+
+       unsigned int hwdesc_count;
+       struct udma_hwdesc hwdesc[0];
+};
+
+enum udma_chan_state {
+       UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
+       UDMA_CHAN_IS_ACTIVE, /* Normal operation */
+       UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
+};
+
+struct udma_tx_drain {
+       struct delayed_work work;
+       unsigned long jiffie;
+       u32 residue;
+};
+
+struct udma_chan_config {
+       bool pkt_mode; /* TR or packet */
+       bool needs_epib; /* EPIB is needed for the communication or not */
+       u32 psd_size; /* size of Protocol Specific Data */
+       u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
+       u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+       bool notdpkt; /* Suppress sending TDC packet */
+       int remote_thread_id;
+       u32 src_thread;
+       u32 dst_thread;
+       enum psil_endpoint_type ep_type;
+       bool enable_acc32;
+       bool enable_burst;
+       enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+
+       enum dma_transfer_direction dir;
+};
+
+struct udma_chan {
+       struct virt_dma_chan vc;
+       struct dma_slave_config cfg;
+       struct udma_dev *ud;
+       struct udma_desc *desc;
+       struct udma_desc *terminated_desc;
+       struct udma_static_tr static_tr;
+       char *name;
+
+       struct udma_tchan *tchan;
+       struct udma_rchan *rchan;
+       struct udma_rflow *rflow;
+
+       bool psil_paired;
+
+       int irq_num_ring;
+       int irq_num_udma;
+
+       bool cyclic;
+       bool paused;
+
+       enum udma_chan_state state;
+       struct completion teardown_completed;
+
+       struct udma_tx_drain tx_drain;
+
+       u32 bcnt; /* number of bytes completed since the start of the channel */
+       u32 in_ring_cnt; /* number of descriptors in flight */
+
+       /* Channel configuration parameters */
+       struct udma_chan_config config;
+
+       /* dmapool for packet mode descriptors */
+       bool use_dma_pool;
+       struct dma_pool *hdesc_pool;
+
+       u32 id;
+};
+
+static inline struct udma_dev *to_udma_dev(struct dma_device *d)
+{
+       return container_of(d, struct udma_dev, ddev);
+}
+
+static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct udma_chan, vc.chan);
+}
+
+static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct udma_desc, vd.tx);
+}
+
+/* Generic register access functions */
+static inline u32 udma_read(void __iomem *base, int reg)
+{
+       return readl(base + reg);
+}
+
+static inline void udma_write(void __iomem *base, int reg, u32 val)
+{
+       writel(val, base + reg);
+}
+
+static inline void udma_update_bits(void __iomem *base, int reg,
+                                   u32 mask, u32 val)
+{
+       u32 tmp, orig;
+
+       orig = readl(base + reg);
+       tmp = orig & ~mask;
+       tmp |= (val & mask);
+
+       if (tmp != orig)
+               writel(tmp, base + reg);
+}
+
+/* TCHANRT */
+static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
+{
+       if (!tchan)
+               return 0;
+       return udma_read(tchan->reg_rt, reg);
+}
+
+static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
+                                     u32 val)
+{
+       if (!tchan)
+               return;
+       udma_write(tchan->reg_rt, reg, val);
+}
+
+static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
+                                           u32 mask, u32 val)
+{
+       if (!tchan)
+               return;
+       udma_update_bits(tchan->reg_rt, reg, mask, val);
+}
+
+/* RCHANRT */
+static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
+{
+       if (!rchan)
+               return 0;
+       return udma_read(rchan->reg_rt, reg);
+}
+
+static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
+                                     u32 val)
+{
+       if (!rchan)
+               return;
+       udma_write(rchan->reg_rt, reg, val);
+}
+
+static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
+                                           u32 mask, u32 val)
+{
+       if (!rchan)
+               return;
+       udma_update_bits(rchan->reg_rt, reg, mask, val);
+}
+
+static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+       dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+       return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
+                                             tisci_rm->tisci_navss_dev_id,
+                                             src_thread, dst_thread);
+}
+
+static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+                            u32 dst_thread)
+{
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+       dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+       return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
+                                               tisci_rm->tisci_navss_dev_id,
+                                               src_thread, dst_thread);
+}
+
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+       memset(&uc->config, 0, sizeof(uc->config));
+       uc->config.remote_thread_id = -1;
+       uc->state = UDMA_CHAN_IS_IDLE;
+}
+
+static void udma_dump_chan_stdata(struct udma_chan *uc)
+{
+       struct device *dev = uc->ud->dev;
+       u32 offset;
+       int i;
+
+       if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
+               dev_dbg(dev, "TCHAN State data:\n");
+               for (i = 0; i < 32; i++) {
+                       offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
+                       dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
+                               udma_tchanrt_read(uc->tchan, offset));
+               }
+       }
+
+       if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
+               dev_dbg(dev, "RCHAN State data:\n");
+               for (i = 0; i < 32; i++) {
+                       offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
+                       dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
+                               udma_rchanrt_read(uc->rchan, offset));
+               }
+       }
+}
+
+static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
+                                                   int idx)
+{
+       return d->hwdesc[idx].cppi5_desc_paddr;
+}
+
+static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
+{
+       return d->hwdesc[idx].cppi5_desc_vaddr;
+}
+
+static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
+                                                  dma_addr_t paddr)
+{
+       struct udma_desc *d = uc->terminated_desc;
+
+       if (d) {
+               dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+                                                                  d->desc_idx);
+
+               if (desc_paddr != paddr)
+                       d = NULL;
+       }
+
+       if (!d) {
+               d = uc->desc;
+               if (d) {
+                       dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+                                                               d->desc_idx);
+
+                       if (desc_paddr != paddr)
+                               d = NULL;
+               }
+       }
+
+       return d;
+}
+
+static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
+{
+       if (uc->use_dma_pool) {
+               int i;
+
+               for (i = 0; i < d->hwdesc_count; i++) {
+                       if (!d->hwdesc[i].cppi5_desc_vaddr)
+                               continue;
+
+                       dma_pool_free(uc->hdesc_pool,
+                                     d->hwdesc[i].cppi5_desc_vaddr,
+                                     d->hwdesc[i].cppi5_desc_paddr);
+
+                       d->hwdesc[i].cppi5_desc_vaddr = NULL;
+               }
+       } else if (d->hwdesc[0].cppi5_desc_vaddr) {
+               struct udma_dev *ud = uc->ud;
+
+               dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+                                 d->hwdesc[0].cppi5_desc_vaddr,
+                                 d->hwdesc[0].cppi5_desc_paddr);
+
+               d->hwdesc[0].cppi5_desc_vaddr = NULL;
+       }
+}
+
+static void udma_purge_desc_work(struct work_struct *work)
+{
+       struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
+       struct virt_dma_desc *vd, *_vd;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&ud->lock, flags);
+       list_splice_tail_init(&ud->desc_to_purge, &head);
+       spin_unlock_irqrestore(&ud->lock, flags);
+
+       list_for_each_entry_safe(vd, _vd, &head, node) {
+               struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+               struct udma_desc *d = to_udma_desc(&vd->tx);
+
+               udma_free_hwdesc(uc, d);
+               list_del(&vd->node);
+               kfree(d);
+       }
+
+       /* If more to purge, schedule the work again */
+       if (!list_empty(&ud->desc_to_purge))
+               schedule_work(&ud->purge_work);
+}
+
+static void udma_desc_free(struct virt_dma_desc *vd)
+{
+       struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
+       struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+       struct udma_desc *d = to_udma_desc(&vd->tx);
+       unsigned long flags;
+
+       if (uc->terminated_desc == d)
+               uc->terminated_desc = NULL;
+
+       if (uc->use_dma_pool) {
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return;
+       }
+
+       spin_lock_irqsave(&ud->lock, flags);
+       list_add_tail(&vd->node, &ud->desc_to_purge);
+       spin_unlock_irqrestore(&ud->lock, flags);
+
+       schedule_work(&ud->purge_work);
+}
+
+static bool udma_is_chan_running(struct udma_chan *uc)
+{
+       u32 trt_ctl = 0;
+       u32 rrt_ctl = 0;
+
+       if (uc->tchan)
+               trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+       if (uc->rchan)
+               rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
+
+       if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
+               return true;
+
+       return false;
+}
+
+static bool udma_is_chan_paused(struct udma_chan *uc)
+{
+       u32 val, pause_mask;
+
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               val = udma_rchanrt_read(uc->rchan,
+                                       UDMA_RCHAN_RT_PEER_RT_EN_REG);
+               pause_mask = UDMA_PEER_RT_EN_PAUSE;
+               break;
+       case DMA_MEM_TO_DEV:
+               val = udma_tchanrt_read(uc->tchan,
+                                       UDMA_TCHAN_RT_PEER_RT_EN_REG);
+               pause_mask = UDMA_PEER_RT_EN_PAUSE;
+               break;
+       case DMA_MEM_TO_MEM:
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+               pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
+               break;
+       default:
+               return false;
+       }
+
+       if (val & pause_mask)
+               return true;
+
+       return false;
+}
+
+static void udma_sync_for_device(struct udma_chan *uc, int idx)
+{
+       struct udma_desc *d = uc->desc;
+
+       if (uc->cyclic && uc->config.pkt_mode) {
+               dma_sync_single_for_device(uc->ud->dev,
+                                          d->hwdesc[idx].cppi5_desc_paddr,
+                                          d->hwdesc[idx].cppi5_desc_size,
+                                          DMA_TO_DEVICE);
+       } else {
+               int i;
+
+               for (i = 0; i < d->hwdesc_count; i++) {
+                       if (!d->hwdesc[i].cppi5_desc_vaddr)
+                               continue;
+
+                       dma_sync_single_for_device(uc->ud->dev,
+                                               d->hwdesc[i].cppi5_desc_paddr,
+                                               d->hwdesc[i].cppi5_desc_size,
+                                               DMA_TO_DEVICE);
+               }
+       }
+}
+
+static int udma_push_to_ring(struct udma_chan *uc, int idx)
+{
+       struct udma_desc *d = uc->desc;
+
+       struct k3_ring *ring = NULL;
+       int ret = -EINVAL;
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               ring = uc->rflow->fd_ring;
+               break;
+       case DMA_MEM_TO_DEV:
+       case DMA_MEM_TO_MEM:
+               ring = uc->tchan->t_ring;
+               break;
+       default:
+               break;
+       }
+
+       if (ring) {
+               dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+
+               wmb(); /* Ensure that writes are not moved over this point */
+               udma_sync_for_device(uc, idx);
+               ret = k3_ringacc_ring_push(ring, &desc_addr);
+               uc->in_ring_cnt++;
+       }
+
+       return ret;
+}
+
+static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
+{
+       struct k3_ring *ring = NULL;
+       int ret = -ENOENT;
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               ring = uc->rflow->r_ring;
+               break;
+       case DMA_MEM_TO_DEV:
+       case DMA_MEM_TO_MEM:
+               ring = uc->tchan->tc_ring;
+               break;
+       default:
+               break;
+       }
+
+       if (ring && k3_ringacc_ring_get_occ(ring)) {
+               struct udma_desc *d = NULL;
+
+               ret = k3_ringacc_ring_pop(ring, addr);
+               if (ret)
+                       return ret;
+
+               /* Teardown completion */
+               if (cppi5_desc_is_tdcm(*addr))
+                       return ret;
+
+               d = udma_udma_desc_from_paddr(uc, *addr);
+
+               if (d)
+                       dma_sync_single_for_cpu(uc->ud->dev, *addr,
+                                               d->hwdesc[0].cppi5_desc_size,
+                                               DMA_FROM_DEVICE);
+               rmb(); /* Ensure that reads are not moved before this point */
+
+               if (!ret)
+                       uc->in_ring_cnt--;
+       }
+
+       return ret;
+}
+
+static void udma_reset_rings(struct udma_chan *uc)
+{
+       struct k3_ring *ring1 = NULL;
+       struct k3_ring *ring2 = NULL;
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               if (uc->rchan) {
+                       ring1 = uc->rflow->fd_ring;
+                       ring2 = uc->rflow->r_ring;
+               }
+               break;
+       case DMA_MEM_TO_DEV:
+       case DMA_MEM_TO_MEM:
+               if (uc->tchan) {
+                       ring1 = uc->tchan->t_ring;
+                       ring2 = uc->tchan->tc_ring;
+               }
+               break;
+       default:
+               break;
+       }
+
+       if (ring1)
+               k3_ringacc_ring_reset_dma(ring1,
+                                         k3_ringacc_ring_get_occ(ring1));
+       if (ring2)
+               k3_ringacc_ring_reset(ring2);
+
+       /* make sure we are not leaking memory by stalled descriptor */
+       if (uc->terminated_desc) {
+               udma_desc_free(&uc->terminated_desc->vd);
+               uc->terminated_desc = NULL;
+       }
+
+       uc->in_ring_cnt = 0;
+}
+
+static void udma_reset_counters(struct udma_chan *uc)
+{
+       u32 val;
+
+       if (uc->tchan) {
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
+
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
+
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
+
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
+       }
+
+       if (uc->rchan) {
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
+
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
+
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
+
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
+       }
+
+       uc->bcnt = 0;
+}
+
+static int udma_reset_chan(struct udma_chan *uc, bool hard)
+{
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Reset all counters */
+       udma_reset_counters(uc);
+
+       /* Hard reset: re-initialize the channel to reset */
+       if (hard) {
+               struct udma_chan_config ucc_backup;
+               int ret;
+
+               memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
+               uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
+
+               /* restore the channel configuration */
+               memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
+               ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
+               if (ret)
+                       return ret;
+
+               /*
+                * Setting forced teardown after forced reset helps recovering
+                * the rchan.
+                */
+               if (uc->config.dir == DMA_DEV_TO_MEM)
+                       udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+                                          UDMA_CHAN_RT_CTL_EN |
+                                          UDMA_CHAN_RT_CTL_TDOWN |
+                                          UDMA_CHAN_RT_CTL_FTDOWN);
+       }
+       uc->state = UDMA_CHAN_IS_IDLE;
+
+       return 0;
+}
+
+static void udma_start_desc(struct udma_chan *uc)
+{
+       struct udma_chan_config *ucc = &uc->config;
+
+       if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+               int i;
+
+               /* Push all descriptors to ring for packet mode cyclic or RX */
+               for (i = 0; i < uc->desc->sglen; i++)
+                       udma_push_to_ring(uc, i);
+       } else {
+               udma_push_to_ring(uc, 0);
+       }
+}
+
+static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
+{
+       /* Only PDMAs have staticTR */
+       if (uc->config.ep_type == PSIL_EP_NATIVE)
+               return false;
+
+       /* Check if the staticTR configuration has changed for TX */
+       if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
+               return true;
+
+       return false;
+}
+
+static int udma_start(struct udma_chan *uc)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
+
+       if (!vd) {
+               uc->desc = NULL;
+               return -ENOENT;
+       }
+
+       list_del(&vd->node);
+
+       uc->desc = to_udma_desc(&vd->tx);
+
+       /* Channel is already running and does not need reconfiguration */
+       if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
+               udma_start_desc(uc);
+               goto out;
+       }
+
+       /* Make sure that we clear the teardown bit, if it is set */
+       udma_reset_chan(uc, false);
+
+       /* Push descriptors before we start the channel */
+       udma_start_desc(uc);
+
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               /* Config remote TR */
+               if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+                       u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+                                 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+                       const struct udma_match_data *match_data =
+                                                       uc->ud->match_data;
+
+                       if (uc->config.enable_acc32)
+                               val |= PDMA_STATIC_TR_XY_ACC32;
+                       if (uc->config.enable_burst)
+                               val |= PDMA_STATIC_TR_XY_BURST;
+
+                       udma_rchanrt_write(uc->rchan,
+                               UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+                       udma_rchanrt_write(uc->rchan,
+                               UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
+                               PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
+                                                match_data->statictr_z_mask));
+
+                       /* save the current staticTR configuration */
+                       memcpy(&uc->static_tr, &uc->desc->static_tr,
+                              sizeof(uc->static_tr));
+               }
+
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+
+               /* Enable remote */
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE);
+
+               break;
+       case DMA_MEM_TO_DEV:
+               /* Config remote TR */
+               if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+                       u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+                                 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+
+                       if (uc->config.enable_acc32)
+                               val |= PDMA_STATIC_TR_XY_ACC32;
+                       if (uc->config.enable_burst)
+                               val |= PDMA_STATIC_TR_XY_BURST;
+
+                       udma_tchanrt_write(uc->tchan,
+                               UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+                       /* save the current staticTR configuration */
+                       memcpy(&uc->static_tr, &uc->desc->static_tr,
+                              sizeof(uc->static_tr));
+               }
+
+               /* Enable remote */
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE);
+
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       uc->state = UDMA_CHAN_IS_ACTIVE;
+out:
+
+       return 0;
+}
+
+static int udma_stop(struct udma_chan *uc)
+{
+       enum udma_chan_state old_state = uc->state;
+
+       uc->state = UDMA_CHAN_IS_TERMINATING;
+       reinit_completion(&uc->teardown_completed);
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE |
+                                  UDMA_PEER_RT_EN_TEARDOWN);
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE |
+                                  UDMA_PEER_RT_EN_FLUSH);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN |
+                                  UDMA_CHAN_RT_CTL_TDOWN);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN |
+                                  UDMA_CHAN_RT_CTL_TDOWN);
+               break;
+       default:
+               uc->state = old_state;
+               complete_all(&uc->teardown_completed);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
+{
+       struct udma_desc *d = uc->desc;
+       struct cppi5_host_desc_t *h_desc;
+
+       h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
+       cppi5_hdesc_reset_to_original(h_desc);
+       udma_push_to_ring(uc, d->desc_idx);
+       d->desc_idx = (d->desc_idx + 1) % d->sglen;
+}
+
+static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
+{
+       struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+       memcpy(d->metadata, h_desc->epib, d->metadata_size);
+}
+
+static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
+{
+       u32 peer_bcnt, bcnt;
+
+       /* Only TX towards PDMA is affected */
+       if (uc->config.ep_type == PSIL_EP_NATIVE ||
+           uc->config.dir != DMA_MEM_TO_DEV)
+               return true;
+
+       peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+       bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+
+       if (peer_bcnt < bcnt) {
+               uc->tx_drain.residue = bcnt - peer_bcnt;
+               uc->tx_drain.jiffie = jiffies;
+               return false;
+       }
+
+       return true;
+}
+
+static void udma_check_tx_completion(struct work_struct *work)
+{
+       struct udma_chan *uc = container_of(work, typeof(*uc),
+                                           tx_drain.work.work);
+       bool desc_done = true;
+       u32 residue_diff;
+       unsigned long jiffie_diff, delay;
+
+       if (uc->desc) {
+               residue_diff = uc->tx_drain.residue;
+               jiffie_diff = uc->tx_drain.jiffie;
+               desc_done = udma_is_desc_really_done(uc, uc->desc);
+       }
+
+       if (!desc_done) {
+               jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+               residue_diff -= uc->tx_drain.residue;
+               if (residue_diff) {
+                       /* Try to guess when we should check next time */
+                       residue_diff /= jiffie_diff;
+                       delay = uc->tx_drain.residue / residue_diff / 3;
+                       if (jiffies_to_msecs(delay) < 5)
+                               delay = 0;
+               } else {
+                       /* No progress, check again in 1 second  */
+                       delay = HZ;
+               }
+
+               schedule_delayed_work(&uc->tx_drain.work, delay);
+       } else if (uc->desc) {
+               struct udma_desc *d = uc->desc;
+
+               uc->bcnt += d->residue;
+               udma_start(uc);
+               vchan_cookie_complete(&d->vd);
+       }
+}
+
+static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+{
+       struct udma_chan *uc = data;
+       struct udma_desc *d;
+       unsigned long flags;
+       dma_addr_t paddr = 0;
+
+       if (udma_pop_from_ring(uc, &paddr) || !paddr)
+               return IRQ_HANDLED;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       /* Teardown completion message */
+       if (cppi5_desc_is_tdcm(paddr)) {
+               /* Compensate our internal pop/push counter */
+               uc->in_ring_cnt++;
+
+               complete_all(&uc->teardown_completed);
+
+               if (uc->terminated_desc) {
+                       udma_desc_free(&uc->terminated_desc->vd);
+                       uc->terminated_desc = NULL;
+               }
+
+               if (!uc->desc)
+                       udma_start(uc);
+
+               goto out;
+       }
+
+       d = udma_udma_desc_from_paddr(uc, paddr);
+
+       if (d) {
+               dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+                                                                  d->desc_idx);
+               if (desc_paddr != paddr) {
+                       dev_err(uc->ud->dev, "not matching descriptors!\n");
+                       goto out;
+               }
+
+               if (uc->cyclic) {
+                       /* push the descriptor back to the ring */
+                       if (d == uc->desc) {
+                               udma_cyclic_packet_elapsed(uc);
+                               vchan_cyclic_callback(&d->vd);
+                       }
+               } else {
+                       bool desc_done = false;
+
+                       if (d == uc->desc) {
+                               desc_done = udma_is_desc_really_done(uc, d);
+
+                               if (desc_done) {
+                                       uc->bcnt += d->residue;
+                                       udma_start(uc);
+                               } else {
+                                       schedule_delayed_work(&uc->tx_drain.work,
+                                                             0);
+                               }
+                       }
+
+                       if (desc_done)
+                               vchan_cookie_complete(&d->vd);
+               }
+       }
+out:
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t udma_udma_irq_handler(int irq, void *data)
+{
+       struct udma_chan *uc = data;
+       struct udma_desc *d;
+       unsigned long flags;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+       d = uc->desc;
+       if (d) {
+               d->tr_idx = (d->tr_idx + 1) % d->sglen;
+
+               if (uc->cyclic) {
+                       vchan_cyclic_callback(&d->vd);
+               } else {
+                       /* TODO: figure out the real amount of data */
+                       uc->bcnt += d->residue;
+                       udma_start(uc);
+                       vchan_cookie_complete(&d->vd);
+               }
+       }
+
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       int start, tmp_from;
+       DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+       tmp_from = from;
+       if (tmp_from < 0)
+               tmp_from = ud->rchan_cnt;
+       /* default flows can't be allocated and accessible only by id */
+       if (tmp_from < ud->rchan_cnt)
+               return -EINVAL;
+
+       if (tmp_from + cnt > ud->rflow_cnt)
+               return -EINVAL;
+
+       bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+                 ud->rflow_cnt);
+
+       start = bitmap_find_next_zero_area(tmp,
+                                          ud->rflow_cnt,
+                                          tmp_from, cnt, 0);
+       if (start >= ud->rflow_cnt)
+               return -ENOMEM;
+
+       if (from >= 0 && start != from)
+               return -EEXIST;
+
+       bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+       return start;
+}
+
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       if (from < ud->rchan_cnt)
+               return -EINVAL;
+       if (from + cnt > ud->rflow_cnt)
+               return -EINVAL;
+
+       bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+       return 0;
+}
+
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+       /*
+        * Attempt to request rflow by ID can be made for any rflow
+        * if not in use with assumption that caller knows what's doing.
+        * TI-SCI FW will perform additional permission check ant way, it's
+        * safe
+        */
+
+       if (id < 0 || id >= ud->rflow_cnt)
+               return ERR_PTR(-ENOENT);
+
+       if (test_bit(id, ud->rflow_in_use))
+               return ERR_PTR(-ENOENT);
+
+       /* GP rflow has to be allocated first */
+       if (!test_bit(id, ud->rflow_gp_map) &&
+           !test_bit(id, ud->rflow_gp_map_allocated))
+               return ERR_PTR(-EINVAL);
+
+       dev_dbg(ud->dev, "get rflow%d\n", id);
+       set_bit(id, ud->rflow_in_use);
+       return &ud->rflows[id];
+}
+
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+       if (!test_bit(rflow->id, ud->rflow_in_use)) {
+               dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+               return;
+       }
+
+       dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+       clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res)                                     \
+static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,    \
+                                              enum udma_tp_level tpl,  \
+                                              int id)                  \
+{                                                                      \
+       if (id >= 0) {                                                  \
+               if (test_bit(id, ud->res##_map)) {                      \
+                       dev_err(ud->dev, "res##%d is in use\n", id);    \
+                       return ERR_PTR(-ENOENT);                        \
+               }                                                       \
+       } else {                                                        \
+               int start;                                              \
+                                                                       \
+               if (tpl >= ud->match_data->tpl_levels)                  \
+                       tpl = ud->match_data->tpl_levels - 1;           \
+                                                                       \
+               start = ud->match_data->level_start_idx[tpl];           \
+                                                                       \
+               id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,   \
+                                       start);                         \
+               if (id == ud->res##_cnt) {                              \
+                       return ERR_PTR(-ENOENT);                        \
+               }                                                       \
+       }                                                               \
+                                                                       \
+       set_bit(id, ud->res##_map);                                     \
+       return &ud->res##s[id];                                         \
+}
+
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+static int udma_get_tchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->tchan) {
+               dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+                       uc->id, uc->tchan->id);
+               return 0;
+       }
+
+       uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+       if (IS_ERR(uc->tchan))
+               return PTR_ERR(uc->tchan);
+
+       return 0;
+}
+
+static int udma_get_rchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->rchan) {
+               dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+                       uc->id, uc->rchan->id);
+               return 0;
+       }
+
+       uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+       if (IS_ERR(uc->rchan))
+               return PTR_ERR(uc->rchan);
+
+       return 0;
+}
+
+static int udma_get_chan_pair(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       const struct udma_match_data *match_data = ud->match_data;
+       int chan_id, end;
+
+       if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+               dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+                        uc->id, uc->tchan->id);
+               return 0;
+       }
+
+       if (uc->tchan) {
+               dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+                       uc->id, uc->tchan->id);
+               return -EBUSY;
+       } else if (uc->rchan) {
+               dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+                       uc->id, uc->rchan->id);
+               return -EBUSY;
+       }
+
+       /* Can be optimized, but let's have it like this for now */
+       end = min(ud->tchan_cnt, ud->rchan_cnt);
+       /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
+       chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
+       for (; chan_id < end; chan_id++) {
+               if (!test_bit(chan_id, ud->tchan_map) &&
+                   !test_bit(chan_id, ud->rchan_map))
+                       break;
+       }
+
+       if (chan_id == end)
+               return -ENOENT;
+
+       set_bit(chan_id, ud->tchan_map);
+       set_bit(chan_id, ud->rchan_map);
+       uc->tchan = &ud->tchans[chan_id];
+       uc->rchan = &ud->rchans[chan_id];
+
+       return 0;
+}
+
+static int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (!uc->rchan) {
+               dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+               return -EINVAL;
+       }
+
+       if (uc->rflow) {
+               dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+                       uc->id, uc->rflow->id);
+               return 0;
+       }
+
+       uc->rflow = __udma_get_rflow(ud, flow_id);
+       if (IS_ERR(uc->rflow))
+               return PTR_ERR(uc->rflow);
+
+       return 0;
+}
+
+static void udma_put_rchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->rchan) {
+               dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+                       uc->rchan->id);
+               clear_bit(uc->rchan->id, ud->rchan_map);
+               uc->rchan = NULL;
+       }
+}
+
+static void udma_put_tchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->tchan) {
+               dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+                       uc->tchan->id);
+               clear_bit(uc->tchan->id, ud->tchan_map);
+               uc->tchan = NULL;
+       }
+}
+
+static void udma_put_rflow(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->rflow) {
+               dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+                       uc->rflow->id);
+               __udma_put_rflow(ud, uc->rflow);
+               uc->rflow = NULL;
+       }
+}
+
+static void udma_free_tx_resources(struct udma_chan *uc)
+{
+       if (!uc->tchan)
+               return;
+
+       k3_ringacc_ring_free(uc->tchan->t_ring);
+       k3_ringacc_ring_free(uc->tchan->tc_ring);
+       uc->tchan->t_ring = NULL;
+       uc->tchan->tc_ring = NULL;
+
+       udma_put_tchan(uc);
+}
+
+static int udma_alloc_tx_resources(struct udma_chan *uc)
+{
+       struct k3_ring_cfg ring_cfg;
+       struct udma_dev *ud = uc->ud;
+       int ret;
+
+       ret = udma_get_tchan(uc);
+       if (ret)
+               return ret;
+
+       uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
+                                                   uc->tchan->id, 0);
+       if (!uc->tchan->t_ring) {
+               ret = -EBUSY;
+               goto err_tx_ring;
+       }
+
+       uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+       if (!uc->tchan->tc_ring) {
+               ret = -EBUSY;
+               goto err_txc_ring;
+       }
+
+       memset(&ring_cfg, 0, sizeof(ring_cfg));
+       ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+       ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+       ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+       ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
+       ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+
+       if (ret)
+               goto err_ringcfg;
+
+       return 0;
+
+err_ringcfg:
+       k3_ringacc_ring_free(uc->tchan->tc_ring);
+       uc->tchan->tc_ring = NULL;
+err_txc_ring:
+       k3_ringacc_ring_free(uc->tchan->t_ring);
+       uc->tchan->t_ring = NULL;
+err_tx_ring:
+       udma_put_tchan(uc);
+
+       return ret;
+}
+
+static void udma_free_rx_resources(struct udma_chan *uc)
+{
+       if (!uc->rchan)
+               return;
+
+       if (uc->rflow) {
+               struct udma_rflow *rflow = uc->rflow;
+
+               k3_ringacc_ring_free(rflow->fd_ring);
+               k3_ringacc_ring_free(rflow->r_ring);
+               rflow->fd_ring = NULL;
+               rflow->r_ring = NULL;
+
+               udma_put_rflow(uc);
+       }
+
+       udma_put_rchan(uc);
+}
+
+static int udma_alloc_rx_resources(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct k3_ring_cfg ring_cfg;
+       struct udma_rflow *rflow;
+       int fd_ring_id;
+       int ret;
+
+       ret = udma_get_rchan(uc);
+       if (ret)
+               return ret;
+
+       /* For MEM_TO_MEM we don't need rflow or rings */
+       if (uc->config.dir == DMA_MEM_TO_MEM)
+               return 0;
+
+       ret = udma_get_rflow(uc, uc->rchan->id);
+       if (ret) {
+               ret = -EBUSY;
+               goto err_rflow;
+       }
+
+       rflow = uc->rflow;
+       fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+       rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
+       if (!rflow->fd_ring) {
+               ret = -EBUSY;
+               goto err_rx_ring;
+       }
+
+       rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+       if (!rflow->r_ring) {
+               ret = -EBUSY;
+               goto err_rxc_ring;
+       }
+
+       memset(&ring_cfg, 0, sizeof(ring_cfg));
+
+       if (uc->config.pkt_mode)
+               ring_cfg.size = SG_MAX_SEGMENTS;
+       else
+               ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+       ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+       ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+       ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+       ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+       ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
+
+       if (ret)
+               goto err_ringcfg;
+
+       return 0;
+
+err_ringcfg:
+       k3_ringacc_ring_free(rflow->r_ring);
+       rflow->r_ring = NULL;
+err_rxc_ring:
+       k3_ringacc_ring_free(rflow->fd_ring);
+       rflow->fd_ring = NULL;
+err_rx_ring:
+       udma_put_rflow(uc);
+err_rflow:
+       udma_put_rchan(uc);
+
+       return ret;
+}
+
+#define TISCI_TCHAN_VALID_PARAMS (                             \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+
+#define TISCI_RCHAN_VALID_PARAMS (                             \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+
+static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+       struct udma_tchan *tchan = uc->tchan;
+       struct udma_rchan *rchan = uc->rchan;
+       int ret = 0;
+
+       /* Non synchronized - mem to mem type of transfer */
+       int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+       struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+       struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+
+       req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+       req_tx.nav_id = tisci_rm->tisci_dev_id;
+       req_tx.index = tchan->id;
+       req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+       req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+       req_tx.txcq_qnum = tc_ring;
+
+       ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+       if (ret) {
+               dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+               return ret;
+       }
+
+       req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+       req_rx.nav_id = tisci_rm->tisci_dev_id;
+       req_rx.index = rchan->id;
+       req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+       req_rx.rxcq_qnum = tc_ring;
+       req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+
+       ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+       if (ret)
+               dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
+
+       return ret;
+}
+
+static int udma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+       struct udma_tchan *tchan = uc->tchan;
+       int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+       struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+       u32 mode, fetch_size;
+       int ret = 0;
+
+       if (uc->config.pkt_mode) {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+               fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+                                                  uc->config.psd_size, 0);
+       } else {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+               fetch_size = sizeof(struct cppi5_desc_hdr_t);
+       }
+
+       req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+       req_tx.nav_id = tisci_rm->tisci_dev_id;
+       req_tx.index = tchan->id;
+       req_tx.tx_chan_type = mode;
+       req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+       req_tx.tx_fetch_size = fetch_size >> 2;
+       req_tx.txcq_qnum = tc_ring;
+
+       ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+       if (ret)
+               dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+       return ret;
+}
+
+static int udma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+       struct udma_rchan *rchan = uc->rchan;
+       int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
+       int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+       struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+       u32 mode, fetch_size;
+       int ret = 0;
+
+       if (uc->config.pkt_mode) {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+               fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+                                                  uc->config.psd_size, 0);
+       } else {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+               fetch_size = sizeof(struct cppi5_desc_hdr_t);
+       }
+
+       req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+       req_rx.nav_id = tisci_rm->tisci_dev_id;
+       req_rx.index = rchan->id;
+       req_rx.rx_fetch_size =  fetch_size >> 2;
+       req_rx.rxcq_qnum = rx_ring;
+       req_rx.rx_chan_type = mode;
+
+       ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+       if (ret) {
+               dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+               return ret;
+       }
+
+       flow_req.valid_params =
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+
+       flow_req.nav_id = tisci_rm->tisci_dev_id;
+       flow_req.flow_index = rchan->id;
+
+       if (uc->config.needs_epib)
+               flow_req.rx_einfo_present = 1;
+       else
+               flow_req.rx_einfo_present = 0;
+       if (uc->config.psd_size)
+               flow_req.rx_psinfo_present = 1;
+       else
+               flow_req.rx_psinfo_present = 0;
+       flow_req.rx_error_handling = 1;
+       flow_req.rx_dest_qnum = rx_ring;
+       flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
+       flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
+       flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
+       flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
+       flow_req.rx_fdq0_sz0_qnum = fd_ring;
+       flow_req.rx_fdq1_qnum = fd_ring;
+       flow_req.rx_fdq2_qnum = fd_ring;
+       flow_req.rx_fdq3_qnum = fd_ring;
+
+       ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+       if (ret)
+               dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
+
+       return 0;
+}
+
+static int udma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       struct udma_dev *ud = to_udma_dev(chan->device);
+       const struct udma_match_data *match_data = ud->match_data;
+       struct k3_ring *irq_ring;
+       u32 irq_udma_idx;
+       int ret;
+
+       if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
+               uc->use_dma_pool = true;
+               /* in case of MEM_TO_MEM we have maximum of two TRs */
+               if (uc->config.dir == DMA_MEM_TO_MEM) {
+                       uc->config.hdesc_size = cppi5_trdesc_calc_size(
+                                       sizeof(struct cppi5_tr_type15_t), 2);
+                       uc->config.pkt_mode = false;
+               }
+       }
+
+       if (uc->use_dma_pool) {
+               uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+                                                uc->config.hdesc_size,
+                                                ud->desc_align,
+                                                0);
+               if (!uc->hdesc_pool) {
+                       dev_err(ud->ddev.dev,
+                               "Descriptor pool allocation failed\n");
+                       uc->use_dma_pool = false;
+                       return -ENOMEM;
+               }
+       }
+
+       /*
+        * Make sure that the completion is in a known state:
+        * No teardown, the channel is idle
+        */
+       reinit_completion(&uc->teardown_completed);
+       complete_all(&uc->teardown_completed);
+       uc->state = UDMA_CHAN_IS_IDLE;
+
+       switch (uc->config.dir) {
+       case DMA_MEM_TO_MEM:
+               /* Non synchronized - mem to mem type of transfer */
+               dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+                       uc->id);
+
+               ret = udma_get_chan_pair(uc);
+               if (ret)
+                       return ret;
+
+               ret = udma_alloc_tx_resources(uc);
+               if (ret)
+                       return ret;
+
+               ret = udma_alloc_rx_resources(uc);
+               if (ret) {
+                       udma_free_tx_resources(uc);
+                       return ret;
+               }
+
+               uc->config.src_thread = ud->psil_base + uc->tchan->id;
+               uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+                                       K3_PSIL_DST_THREAD_ID_OFFSET;
+
+               irq_ring = uc->tchan->tc_ring;
+               irq_udma_idx = uc->tchan->id;
+
+               ret = udma_tisci_m2m_channel_config(uc);
+               break;
+       case DMA_MEM_TO_DEV:
+               /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+               dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+                       uc->id);
+
+               ret = udma_alloc_tx_resources(uc);
+               if (ret) {
+                       uc->config.remote_thread_id = -1;
+                       return ret;
+               }
+
+               uc->config.src_thread = ud->psil_base + uc->tchan->id;
+               uc->config.dst_thread = uc->config.remote_thread_id;
+               uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+               irq_ring = uc->tchan->tc_ring;
+               irq_udma_idx = uc->tchan->id;
+
+               ret = udma_tisci_tx_channel_config(uc);
+               break;
+       case DMA_DEV_TO_MEM:
+               /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+               dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+                       uc->id);
+
+               ret = udma_alloc_rx_resources(uc);
+               if (ret) {
+                       uc->config.remote_thread_id = -1;
+                       return ret;
+               }
+
+               uc->config.src_thread = uc->config.remote_thread_id;
+               uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+                                       K3_PSIL_DST_THREAD_ID_OFFSET;
+
+               irq_ring = uc->rflow->r_ring;
+               irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+
+               ret = udma_tisci_rx_channel_config(uc);
+               break;
+       default:
+               /* Can not happen */
+               dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+                       __func__, uc->id, uc->config.dir);
+               return -EINVAL;
+       }
+
+       /* check if the channel configuration was successful */
+       if (ret)
+               goto err_res_free;
+
+       if (udma_is_chan_running(uc)) {
+               dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+               udma_stop(uc);
+               if (udma_is_chan_running(uc)) {
+                       dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+                       goto err_res_free;
+               }
+       }
+
+       /* PSI-L pairing */
+       ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+       if (ret) {
+               dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+                       uc->config.src_thread, uc->config.dst_thread);
+               goto err_res_free;
+       }
+
+       uc->psil_paired = true;
+
+       uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
+       if (uc->irq_num_ring <= 0) {
+               dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+                       k3_ringacc_get_ring_id(irq_ring));
+               ret = -EINVAL;
+               goto err_psi_free;
+       }
+
+       ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+                         IRQF_TRIGGER_HIGH, uc->name, uc);
+       if (ret) {
+               dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+               goto err_irq_free;
+       }
+
+       /* Event from UDMA (TR events) only needed for slave TR mode channels */
+       if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
+               uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+                                                           irq_udma_idx);
+               if (uc->irq_num_udma <= 0) {
+                       dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
+                               irq_udma_idx);
+                       free_irq(uc->irq_num_ring, uc);
+                       ret = -EINVAL;
+                       goto err_irq_free;
+               }
+
+               ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+                                 uc->name, uc);
+               if (ret) {
+                       dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
+                               uc->id);
+                       free_irq(uc->irq_num_ring, uc);
+                       goto err_irq_free;
+               }
+       } else {
+               uc->irq_num_udma = 0;
+       }
+
+       udma_reset_rings(uc);
+
+       INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+                                 udma_check_tx_completion);
+       return 0;
+
+err_irq_free:
+       uc->irq_num_ring = 0;
+       uc->irq_num_udma = 0;
+err_psi_free:
+       navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+       uc->psil_paired = false;
+err_res_free:
+       udma_free_tx_resources(uc);
+       udma_free_rx_resources(uc);
+
+       udma_reset_uchan(uc);
+
+       if (uc->use_dma_pool) {
+               dma_pool_destroy(uc->hdesc_pool);
+               uc->use_dma_pool = false;
+       }
+
+       return ret;
+}
+
+static int udma_slave_config(struct dma_chan *chan,
+                            struct dma_slave_config *cfg)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+
+       memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
+
+       return 0;
+}
+
+static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
+                                           size_t tr_size, int tr_count,
+                                           enum dma_transfer_direction dir)
+{
+       struct udma_hwdesc *hwdesc;
+       struct cppi5_desc_hdr_t *tr_desc;
+       struct udma_desc *d;
+       u32 reload_count = 0;
+       u32 ring_id;
+
+       switch (tr_size) {
+       case 16:
+       case 32:
+       case 64:
+       case 128:
+               break;
+       default:
+               dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
+               return NULL;
+       }
+
+       /* We have only one descriptor containing multiple TRs */
+       d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->sglen = tr_count;
+
+       d->hwdesc_count = 1;
+       hwdesc = &d->hwdesc[0];
+
+       /* Allocate memory for DMA ring descriptor */
+       if (uc->use_dma_pool) {
+               hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+               hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+                                               GFP_NOWAIT,
+                                               &hwdesc->cppi5_desc_paddr);
+       } else {
+               hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
+                                                                tr_count);
+               hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+                                               uc->ud->desc_align);
+               hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
+                                               hwdesc->cppi5_desc_size,
+                                               &hwdesc->cppi5_desc_paddr,
+                                               GFP_NOWAIT);
+       }
+
+       if (!hwdesc->cppi5_desc_vaddr) {
+               kfree(d);
+               return NULL;
+       }
+
+       /* Start of the TR req records */
+       hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+       /* Start address of the TR response array */
+       hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
+
+       tr_desc = hwdesc->cppi5_desc_vaddr;
+
+       if (uc->cyclic)
+               reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
+
+       if (dir == DMA_DEV_TO_MEM)
+               ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       else
+               ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+       cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
+       cppi5_desc_set_pktids(tr_desc, uc->id,
+                             CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+       cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
+
+       return d;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
+                     unsigned int sglen, enum dma_transfer_direction dir,
+                     unsigned long tx_flags, void *context)
+{
+       enum dma_slave_buswidth dev_width;
+       struct scatterlist *sgent;
+       struct udma_desc *d;
+       size_t tr_size;
+       struct cppi5_tr_type1_t *tr_req = NULL;
+       unsigned int i;
+       u32 burst;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       /* Now allocate and setup the descriptor. */
+       tr_size = sizeof(struct cppi5_tr_type1_t);
+       d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+       if (!d)
+               return NULL;
+
+       d->sglen = sglen;
+
+       tr_req = d->hwdesc[0].tr_req_base;
+       for_each_sg(sgl, sgent, sglen, i) {
+               d->residue += sg_dma_len(sgent);
+
+               cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+                             CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+               cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+               tr_req[i].addr = sg_dma_address(sgent);
+               tr_req[i].icnt0 = burst * dev_width;
+               tr_req[i].dim1 = burst * dev_width;
+               tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+       }
+
+       cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+
+       return d;
+}
+
+static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
+                                  enum dma_slave_buswidth dev_width,
+                                  u16 elcnt)
+{
+       if (uc->config.ep_type != PSIL_EP_PDMA_XY)
+               return 0;
+
+       /* Bus width translates to the element size (ES) */
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               d->static_tr.elsize = 0;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               d->static_tr.elsize = 1;
+               break;
+       case DMA_SLAVE_BUSWIDTH_3_BYTES:
+               d->static_tr.elsize = 2;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               d->static_tr.elsize = 3;
+               break;
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               d->static_tr.elsize = 4;
+               break;
+       default: /* not reached */
+               return -EINVAL;
+       }
+
+       d->static_tr.elcnt = elcnt;
+
+       /*
+        * PDMA must to close the packet when the channel is in packet mode.
+        * For TR mode when the channel is not cyclic we also need PDMA to close
+        * the packet otherwise the transfer will stall because PDMA holds on
+        * the data it has received from the peripheral.
+        */
+       if (uc->config.pkt_mode || !uc->cyclic) {
+               unsigned int div = dev_width * elcnt;
+
+               if (uc->cyclic)
+                       d->static_tr.bstcnt = d->residue / d->sglen / div;
+               else
+                       d->static_tr.bstcnt = d->residue / div;
+
+               if (uc->config.dir == DMA_DEV_TO_MEM &&
+                   d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+                       return -EINVAL;
+       } else {
+               d->static_tr.bstcnt = 0;
+       }
+
+       return 0;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
+                      unsigned int sglen, enum dma_transfer_direction dir,
+                      unsigned long tx_flags, void *context)
+{
+       struct scatterlist *sgent;
+       struct cppi5_host_desc_t *h_desc = NULL;
+       struct udma_desc *d;
+       u32 ring_id;
+       unsigned int i;
+
+       d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->sglen = sglen;
+       d->hwdesc_count = sglen;
+
+       if (dir == DMA_DEV_TO_MEM)
+               ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       else
+               ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+       for_each_sg(sgl, sgent, sglen, i) {
+               struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+               dma_addr_t sg_addr = sg_dma_address(sgent);
+               struct cppi5_host_desc_t *desc;
+               size_t sg_len = sg_dma_len(sgent);
+
+               hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+                                               GFP_NOWAIT,
+                                               &hwdesc->cppi5_desc_paddr);
+               if (!hwdesc->cppi5_desc_vaddr) {
+                       dev_err(uc->ud->dev,
+                               "descriptor%d allocation failed\n", i);
+
+                       udma_free_hwdesc(uc, d);
+                       kfree(d);
+                       return NULL;
+               }
+
+               d->residue += sg_len;
+               hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+               desc = hwdesc->cppi5_desc_vaddr;
+
+               if (i == 0) {
+                       cppi5_hdesc_init(desc, 0, 0);
+                       /* Flow and Packed ID */
+                       cppi5_desc_set_pktids(&desc->hdr, uc->id,
+                                             CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+                       cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
+               } else {
+                       cppi5_hdesc_reset_hbdesc(desc);
+                       cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
+               }
+
+               /* attach the sg buffer to the descriptor */
+               cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
+
+               /* Attach link as host buffer descriptor */
+               if (h_desc)
+                       cppi5_hdesc_link_hbdesc(h_desc,
+                                               hwdesc->cppi5_desc_paddr);
+
+               if (dir == DMA_MEM_TO_DEV)
+                       h_desc = desc;
+       }
+
+       if (d->residue >= SZ_4M) {
+               dev_err(uc->ud->dev,
+                       "%s: Transfer size %u is over the supported 4M range\n",
+                       __func__, d->residue);
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return NULL;
+       }
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+       cppi5_hdesc_set_pktlen(h_desc, d->residue);
+
+       return d;
+}
+
+static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
+                               void *data, size_t len)
+{
+       struct udma_desc *d = to_udma_desc(desc);
+       struct udma_chan *uc = to_udma_chan(desc->chan);
+       struct cppi5_host_desc_t *h_desc;
+       u32 psd_size = len;
+       u32 flags = 0;
+
+       if (!uc->config.pkt_mode || !uc->config.metadata_size)
+               return -ENOTSUPP;
+
+       if (!data || len > uc->config.metadata_size)
+               return -EINVAL;
+
+       if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+               return -EINVAL;
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+       if (d->dir == DMA_MEM_TO_DEV)
+               memcpy(h_desc->epib, data, len);
+
+       if (uc->config.needs_epib)
+               psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       d->metadata = data;
+       d->metadata_size = len;
+       if (uc->config.needs_epib)
+               flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+
+       cppi5_hdesc_update_flags(h_desc, flags);
+       cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+       return 0;
+}
+
+static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+                                  size_t *payload_len, size_t *max_len)
+{
+       struct udma_desc *d = to_udma_desc(desc);
+       struct udma_chan *uc = to_udma_chan(desc->chan);
+       struct cppi5_host_desc_t *h_desc;
+
+       if (!uc->config.pkt_mode || !uc->config.metadata_size)
+               return ERR_PTR(-ENOTSUPP);
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+       *max_len = uc->config.metadata_size;
+
+       *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
+                      CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
+       *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
+
+       return h_desc->epib;
+}
+
+static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
+                                size_t payload_len)
+{
+       struct udma_desc *d = to_udma_desc(desc);
+       struct udma_chan *uc = to_udma_chan(desc->chan);
+       struct cppi5_host_desc_t *h_desc;
+       u32 psd_size = payload_len;
+       u32 flags = 0;
+
+       if (!uc->config.pkt_mode || !uc->config.metadata_size)
+               return -ENOTSUPP;
+
+       if (payload_len > uc->config.metadata_size)
+               return -EINVAL;
+
+       if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+               return -EINVAL;
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+       if (uc->config.needs_epib) {
+               psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+               flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+       }
+
+       cppi5_hdesc_update_flags(h_desc, flags);
+       cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+       return 0;
+}
+
+static struct dma_descriptor_metadata_ops metadata_ops = {
+       .attach = udma_attach_metadata,
+       .get_ptr = udma_get_metadata_ptr,
+       .set_len = udma_set_metadata_len,
+};
+
+static struct dma_async_tx_descriptor *
+udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                  unsigned int sglen, enum dma_transfer_direction dir,
+                  unsigned long tx_flags, void *context)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct udma_desc *d;
+       u32 burst;
+
+       if (dir != uc->config.dir) {
+               dev_err(chan->device->dev,
+                       "%s: chan%d is for %s, not supporting %s\n",
+                       __func__, uc->id,
+                       dmaengine_get_direction_text(uc->config.dir),
+                       dmaengine_get_direction_text(dir));
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       if (uc->config.pkt_mode)
+               d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
+                                          context);
+       else
+               d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
+                                         context);
+
+       if (!d)
+               return NULL;
+
+       d->dir = dir;
+       d->desc_idx = 0;
+       d->tr_idx = 0;
+
+       /* static TR for remote PDMA */
+       if (udma_configure_statictr(uc, d, dev_width, burst)) {
+               dev_err(uc->ud->dev,
+                       "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+                       __func__, d->static_tr.bstcnt);
+
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return NULL;
+       }
+
+       if (uc->config.metadata_size)
+               d->vd.tx.metadata_ops = &metadata_ops;
+
+       return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
+                       size_t buf_len, size_t period_len,
+                       enum dma_transfer_direction dir, unsigned long flags)
+{
+       enum dma_slave_buswidth dev_width;
+       struct udma_desc *d;
+       size_t tr_size;
+       struct cppi5_tr_type1_t *tr_req;
+       unsigned int i;
+       unsigned int periods = buf_len / period_len;
+       u32 burst;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       /* Now allocate and setup the descriptor. */
+       tr_size = sizeof(struct cppi5_tr_type1_t);
+       d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+       if (!d)
+               return NULL;
+
+       tr_req = d->hwdesc[0].tr_req_base;
+       for (i = 0; i < periods; i++) {
+               cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+                             CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+               tr_req[i].addr = buf_addr + period_len * i;
+               tr_req[i].icnt0 = dev_width;
+               tr_req[i].icnt1 = period_len / dev_width;
+               tr_req[i].dim1 = dev_width;
+
+               if (!(flags & DMA_PREP_INTERRUPT))
+                       cppi5_tr_csf_set(&tr_req[i].flags,
+                                        CPPI5_TR_CSF_SUPR_EVT);
+       }
+
+       return d;
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
+                        size_t buf_len, size_t period_len,
+                        enum dma_transfer_direction dir, unsigned long flags)
+{
+       struct udma_desc *d;
+       u32 ring_id;
+       int i;
+       int periods = buf_len / period_len;
+
+       if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
+               return NULL;
+
+       if (period_len >= SZ_4M)
+               return NULL;
+
+       d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->hwdesc_count = periods;
+
+       /* TODO: re-check this... */
+       if (dir == DMA_DEV_TO_MEM)
+               ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       else
+               ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+       for (i = 0; i < periods; i++) {
+               struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+               dma_addr_t period_addr = buf_addr + (period_len * i);
+               struct cppi5_host_desc_t *h_desc;
+
+               hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+                                               GFP_NOWAIT,
+                                               &hwdesc->cppi5_desc_paddr);
+               if (!hwdesc->cppi5_desc_vaddr) {
+                       dev_err(uc->ud->dev,
+                               "descriptor%d allocation failed\n", i);
+
+                       udma_free_hwdesc(uc, d);
+                       kfree(d);
+                       return NULL;
+               }
+
+               hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+               h_desc = hwdesc->cppi5_desc_vaddr;
+
+               cppi5_hdesc_init(h_desc, 0, 0);
+               cppi5_hdesc_set_pktlen(h_desc, period_len);
+
+               /* Flow and Packed ID */
+               cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
+                                     CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+               cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
+
+               /* attach each period to a new descriptor */
+               cppi5_hdesc_attach_buf(h_desc,
+                                      period_addr, period_len,
+                                      period_addr, period_len);
+       }
+
+       return d;
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+                    size_t period_len, enum dma_transfer_direction dir,
+                    unsigned long flags)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct udma_desc *d;
+       u32 burst;
+
+       if (dir != uc->config.dir) {
+               dev_err(chan->device->dev,
+                       "%s: chan%d is for %s, not supporting %s\n",
+                       __func__, uc->id,
+                       dmaengine_get_direction_text(uc->config.dir),
+                       dmaengine_get_direction_text(dir));
+               return NULL;
+       }
+
+       uc->cyclic = true;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       if (uc->config.pkt_mode)
+               d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
+                                            dir, flags);
+       else
+               d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
+                                           dir, flags);
+
+       if (!d)
+               return NULL;
+
+       d->sglen = buf_len / period_len;
+
+       d->dir = dir;
+       d->residue = buf_len;
+
+       /* static TR for remote PDMA */
+       if (udma_configure_statictr(uc, d, dev_width, burst)) {
+               dev_err(uc->ud->dev,
+                       "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+                       __func__, d->static_tr.bstcnt);
+
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return NULL;
+       }
+
+       if (uc->config.metadata_size)
+               d->vd.tx.metadata_ops = &metadata_ops;
+
+       return vchan_tx_prep(&uc->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+                    size_t len, unsigned long tx_flags)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       struct udma_desc *d;
+       struct cppi5_tr_type15_t *tr_req;
+       int num_tr;
+       size_t tr_size = sizeof(struct cppi5_tr_type15_t);
+       u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+
+       if (uc->config.dir != DMA_MEM_TO_MEM) {
+               dev_err(chan->device->dev,
+                       "%s: chan%d is for %s, not supporting %s\n",
+                       __func__, uc->id,
+                       dmaengine_get_direction_text(uc->config.dir),
+                       dmaengine_get_direction_text(DMA_MEM_TO_MEM));
+               return NULL;
+       }
+
+       if (len < SZ_64K) {
+               num_tr = 1;
+               tr0_cnt0 = len;
+               tr0_cnt1 = 1;
+       } else {
+               unsigned long align_to = __ffs(src | dest);
+
+               if (align_to > 3)
+                       align_to = 3;
+               /*
+                * Keep simple: tr0: SZ_64K-alignment blocks,
+                *              tr1: the remaining
+                */
+               num_tr = 2;
+               tr0_cnt0 = (SZ_64K - BIT(align_to));
+               if (len / tr0_cnt0 >= SZ_64K) {
+                       dev_err(uc->ud->dev, "size %zu is not supported\n",
+                               len);
+                       return NULL;
+               }
+
+               tr0_cnt1 = len / tr0_cnt0;
+               tr1_cnt0 = len % tr0_cnt0;
+       }
+
+       d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
+       if (!d)
+               return NULL;
+
+       d->dir = DMA_MEM_TO_MEM;
+       d->desc_idx = 0;
+       d->tr_idx = 0;
+       d->residue = len;
+
+       tr_req = d->hwdesc[0].tr_req_base;
+
+       cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
+                     CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+       cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+       tr_req[0].addr = src;
+       tr_req[0].icnt0 = tr0_cnt0;
+       tr_req[0].icnt1 = tr0_cnt1;
+       tr_req[0].icnt2 = 1;
+       tr_req[0].icnt3 = 1;
+       tr_req[0].dim1 = tr0_cnt0;
+
+       tr_req[0].daddr = dest;
+       tr_req[0].dicnt0 = tr0_cnt0;
+       tr_req[0].dicnt1 = tr0_cnt1;
+       tr_req[0].dicnt2 = 1;
+       tr_req[0].dicnt3 = 1;
+       tr_req[0].ddim1 = tr0_cnt0;
+
+       if (num_tr == 2) {
+               cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
+                             CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+               cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+               tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
+               tr_req[1].icnt0 = tr1_cnt0;
+               tr_req[1].icnt1 = 1;
+               tr_req[1].icnt2 = 1;
+               tr_req[1].icnt3 = 1;
+
+               tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
+               tr_req[1].dicnt0 = tr1_cnt0;
+               tr_req[1].dicnt1 = 1;
+               tr_req[1].dicnt2 = 1;
+               tr_req[1].dicnt3 = 1;
+       }
+
+       cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+
+       if (uc->config.metadata_size)
+               d->vd.tx.metadata_ops = &metadata_ops;
+
+       return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static void udma_issue_pending(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       /* If we have something pending and no active descriptor, then */
+       if (vchan_issue_pending(&uc->vc) && !uc->desc) {
+               /*
+                * start a descriptor if the channel is NOT [marked as
+                * terminating _and_ it is still running (teardown has not
+                * completed yet)].
+                */
+               if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
+                     udma_is_chan_running(uc)))
+                       udma_start(uc);
+       }
+
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+}
+
+static enum dma_status udma_tx_status(struct dma_chan *chan,
+                                     dma_cookie_t cookie,
+                                     struct dma_tx_state *txstate)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       enum dma_status ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
+               ret = DMA_PAUSED;
+
+       if (ret == DMA_COMPLETE || !txstate)
+               goto out;
+
+       if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
+               u32 peer_bcnt = 0;
+               u32 bcnt = 0;
+               u32 residue = uc->desc->residue;
+               u32 delay = 0;
+
+               if (uc->desc->dir == DMA_MEM_TO_DEV) {
+                       bcnt = udma_tchanrt_read(uc->tchan,
+                                                UDMA_TCHAN_RT_SBCNT_REG);
+
+                       if (uc->config.ep_type != PSIL_EP_NATIVE) {
+                               peer_bcnt = udma_tchanrt_read(uc->tchan,
+                                               UDMA_TCHAN_RT_PEER_BCNT_REG);
+
+                               if (bcnt > peer_bcnt)
+                                       delay = bcnt - peer_bcnt;
+                       }
+               } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
+                       bcnt = udma_rchanrt_read(uc->rchan,
+                                                UDMA_RCHAN_RT_BCNT_REG);
+
+                       if (uc->config.ep_type != PSIL_EP_NATIVE) {
+                               peer_bcnt = udma_rchanrt_read(uc->rchan,
+                                               UDMA_RCHAN_RT_PEER_BCNT_REG);
+
+                               if (peer_bcnt > bcnt)
+                                       delay = peer_bcnt - bcnt;
+                       }
+               } else {
+                       bcnt = udma_tchanrt_read(uc->tchan,
+                                                UDMA_TCHAN_RT_BCNT_REG);
+               }
+
+               bcnt -= uc->bcnt;
+               if (bcnt && !(bcnt % uc->desc->residue))
+                       residue = 0;
+               else
+                       residue -= bcnt % uc->desc->residue;
+
+               if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
+                       ret = DMA_COMPLETE;
+                       delay = 0;
+               }
+
+               dma_set_residue(txstate, residue);
+               dma_set_in_flight_bytes(txstate, delay);
+
+       } else {
+               ret = DMA_COMPLETE;
+       }
+
+out:
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+       return ret;
+}
+
+static int udma_pause(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+
+       if (!uc->desc)
+               return -EINVAL;
+
+       /* pause the channel */
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_update_bits(uc->rchan,
+                                        UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE,
+                                        UDMA_PEER_RT_EN_PAUSE);
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_update_bits(uc->tchan,
+                                        UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE,
+                                        UDMA_PEER_RT_EN_PAUSE);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                        UDMA_CHAN_RT_CTL_PAUSE,
+                                        UDMA_CHAN_RT_CTL_PAUSE);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int udma_resume(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+
+       if (!uc->desc)
+               return -EINVAL;
+
+       /* resume the channel */
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_update_bits(uc->rchan,
+                                        UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE, 0);
+
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_update_bits(uc->tchan,
+                                        UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE, 0);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                        UDMA_CHAN_RT_CTL_PAUSE, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int udma_terminate_all(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       if (udma_is_chan_running(uc))
+               udma_stop(uc);
+
+       if (uc->desc) {
+               uc->terminated_desc = uc->desc;
+               uc->desc = NULL;
+               uc->terminated_desc->terminated = true;
+               cancel_delayed_work(&uc->tx_drain.work);
+       }
+
+       uc->paused = false;
+
+       vchan_get_all_descriptors(&uc->vc, &head);
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+       vchan_dma_desc_free_list(&uc->vc, &head);
+
+       return 0;
+}
+
+static void udma_synchronize(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       unsigned long timeout = msecs_to_jiffies(1000);
+
+       vchan_synchronize(&uc->vc);
+
+       if (uc->state == UDMA_CHAN_IS_TERMINATING) {
+               timeout = wait_for_completion_timeout(&uc->teardown_completed,
+                                                     timeout);
+               if (!timeout) {
+                       dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
+                                uc->id);
+                       udma_dump_chan_stdata(uc);
+                       udma_reset_chan(uc, true);
+               }
+       }
+
+       udma_reset_chan(uc, false);
+       if (udma_is_chan_running(uc))
+               dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+
+       cancel_delayed_work_sync(&uc->tx_drain.work);
+       udma_reset_rings(uc);
+}
+
+static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+                                  struct virt_dma_desc *vd,
+                                  struct dmaengine_result *result)
+{
+       struct udma_chan *uc = to_udma_chan(&vc->chan);
+       struct udma_desc *d;
+
+       if (!vd)
+               return;
+
+       d = to_udma_desc(&vd->tx);
+
+       if (d->metadata_size)
+               udma_fetch_epib(uc, d);
+
+       /* Provide residue information for the client */
+       if (result) {
+               void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+               if (cppi5_desc_get_type(desc_vaddr) ==
+                   CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+                       result->residue = d->residue -
+                                         cppi5_hdesc_get_pktlen(desc_vaddr);
+                       if (result->residue)
+                               result->result = DMA_TRANS_ABORTED;
+                       else
+                               result->result = DMA_TRANS_NOERROR;
+               } else {
+                       result->residue = 0;
+                       result->result = DMA_TRANS_NOERROR;
+               }
+       }
+}
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void udma_vchan_complete(unsigned long arg)
+{
+       struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+       struct virt_dma_desc *vd, *_vd;
+       struct dmaengine_desc_callback cb;
+       LIST_HEAD(head);
+
+       spin_lock_irq(&vc->lock);
+       list_splice_tail_init(&vc->desc_completed, &head);
+       vd = vc->cyclic;
+       if (vd) {
+               vc->cyclic = NULL;
+               dmaengine_desc_get_callback(&vd->tx, &cb);
+       } else {
+               memset(&cb, 0, sizeof(cb));
+       }
+       spin_unlock_irq(&vc->lock);
+
+       udma_desc_pre_callback(vc, vd, NULL);
+       dmaengine_desc_callback_invoke(&cb, NULL);
+
+       list_for_each_entry_safe(vd, _vd, &head, node) {
+               struct dmaengine_result result;
+
+               dmaengine_desc_get_callback(&vd->tx, &cb);
+
+               list_del(&vd->node);
+
+               udma_desc_pre_callback(vc, vd, &result);
+               dmaengine_desc_callback_invoke(&cb, &result);
+
+               vchan_vdesc_fini(vd);
+       }
+}
+
+static void udma_free_chan_resources(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       struct udma_dev *ud = to_udma_dev(chan->device);
+
+       udma_terminate_all(chan);
+       if (uc->terminated_desc) {
+               udma_reset_chan(uc, false);
+               udma_reset_rings(uc);
+       }
+
+       cancel_delayed_work_sync(&uc->tx_drain.work);
+       destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
+       if (uc->irq_num_ring > 0) {
+               free_irq(uc->irq_num_ring, uc);
+
+               uc->irq_num_ring = 0;
+       }
+       if (uc->irq_num_udma > 0) {
+               free_irq(uc->irq_num_udma, uc);
+
+               uc->irq_num_udma = 0;
+       }
+
+       /* Release PSI-L pairing */
+       if (uc->psil_paired) {
+               navss_psil_unpair(ud, uc->config.src_thread,
+                                 uc->config.dst_thread);
+               uc->psil_paired = false;
+       }
+
+       vchan_free_chan_resources(&uc->vc);
+       tasklet_kill(&uc->vc.task);
+
+       udma_free_tx_resources(uc);
+       udma_free_rx_resources(uc);
+       udma_reset_uchan(uc);
+
+       if (uc->use_dma_pool) {
+               dma_pool_destroy(uc->hdesc_pool);
+               uc->use_dma_pool = false;
+       }
+}
+
+static struct platform_driver udma_driver;
+
+static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+       struct udma_chan_config *ucc;
+       struct psil_endpoint_config *ep_config;
+       struct udma_chan *uc;
+       struct udma_dev *ud;
+       u32 *args;
+
+       if (chan->device->dev->driver != &udma_driver.driver)
+               return false;
+
+       uc = to_udma_chan(chan);
+       ucc = &uc->config;
+       ud = uc->ud;
+       args = param;
+
+       ucc->remote_thread_id = args[0];
+
+       if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+               ucc->dir = DMA_MEM_TO_DEV;
+       else
+               ucc->dir = DMA_DEV_TO_MEM;
+
+       ep_config = psil_get_ep_config(ucc->remote_thread_id);
+       if (IS_ERR(ep_config)) {
+               dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
+                       ucc->remote_thread_id);
+               ucc->dir = DMA_MEM_TO_MEM;
+               ucc->remote_thread_id = -1;
+               return false;
+       }
+
+       ucc->pkt_mode = ep_config->pkt_mode;
+       ucc->channel_tpl = ep_config->channel_tpl;
+       ucc->notdpkt = ep_config->notdpkt;
+       ucc->ep_type = ep_config->ep_type;
+
+       if (ucc->ep_type != PSIL_EP_NATIVE) {
+               const struct udma_match_data *match_data = ud->match_data;
+
+               if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
+                       ucc->enable_acc32 = ep_config->pdma_acc32;
+               if (match_data->flags & UDMA_FLAG_PDMA_BURST)
+                       ucc->enable_burst = ep_config->pdma_burst;
+       }
+
+       ucc->needs_epib = ep_config->needs_epib;
+       ucc->psd_size = ep_config->psd_size;
+       ucc->metadata_size =
+                       (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
+                       ucc->psd_size;
+
+       if (ucc->pkt_mode)
+               ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+                                ucc->metadata_size, ud->desc_align);
+
+       dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
+               ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
+
+       return true;
+}
+
+static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+                                     struct of_dma *ofdma)
+{
+       struct udma_dev *ud = ofdma->of_dma_data;
+       dma_cap_mask_t mask = ud->ddev.cap_mask;
+       struct dma_chan *chan;
+
+       if (dma_spec->args_count != 1)
+               return NULL;
+
+       chan = __dma_request_channel(&mask, udma_dma_filter_fn,
+                                    &dma_spec->args[0], ofdma->of_node);
+       if (!chan) {
+               dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return chan;
+}
+
+static struct udma_match_data am654_main_data = {
+       .psil_base = 0x1000,
+       .enable_memcpy_support = true,
+       .statictr_z_mask = GENMASK(11, 0),
+       .rchan_oes_offset = 0x2000,
+       .tpl_levels = 2,
+       .level_start_idx = {
+               [0] = 8, /* Normal channels */
+               [1] = 0, /* High Throughput channels */
+       },
+};
+
+static struct udma_match_data am654_mcu_data = {
+       .psil_base = 0x6000,
+       .enable_memcpy_support = true, /* TEST: DMA domains */
+       .statictr_z_mask = GENMASK(11, 0),
+       .rchan_oes_offset = 0x2000,
+       .tpl_levels = 2,
+       .level_start_idx = {
+               [0] = 2, /* Normal channels */
+               [1] = 0, /* High Throughput channels */
+       },
+};
+
+static struct udma_match_data j721e_main_data = {
+       .psil_base = 0x1000,
+       .enable_memcpy_support = true,
+       .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+       .statictr_z_mask = GENMASK(23, 0),
+       .rchan_oes_offset = 0x400,
+       .tpl_levels = 3,
+       .level_start_idx = {
+               [0] = 16, /* Normal channels */
+               [1] = 4, /* High Throughput channels */
+               [2] = 0, /* Ultra High Throughput channels */
+       },
+};
+
+static struct udma_match_data j721e_mcu_data = {
+       .psil_base = 0x6000,
+       .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
+       .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+       .statictr_z_mask = GENMASK(23, 0),
+       .rchan_oes_offset = 0x400,
+       .tpl_levels = 2,
+       .level_start_idx = {
+               [0] = 2, /* Normal channels */
+               [1] = 0, /* High Throughput channels */
+       },
+};
+
+static const struct of_device_id udma_of_match[] = {
+       {
+               .compatible = "ti,am654-navss-main-udmap",
+               .data = &am654_main_data,
+       },
+       {
+               .compatible = "ti,am654-navss-mcu-udmap",
+               .data = &am654_mcu_data,
+       }, {
+               .compatible = "ti,j721e-navss-main-udmap",
+               .data = &j721e_main_data,
+       }, {
+               .compatible = "ti,j721e-navss-mcu-udmap",
+               .data = &j721e_mcu_data,
+       },
+       { /* Sentinel */ },
+};
+
+static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+{
+       struct resource *res;
+       int i;
+
+       for (i = 0; i < MMR_LAST; i++) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  mmr_names[i]);
+               ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(ud->mmrs[i]))
+                       return PTR_ERR(ud->mmrs[i]);
+       }
+
+       return 0;
+}
+
+static int udma_setup_resources(struct udma_dev *ud)
+{
+       struct device *dev = ud->dev;
+       int ch_count, ret, i, j;
+       u32 cap2, cap3;
+       struct ti_sci_resource_desc *rm_desc;
+       struct ti_sci_resource *rm_res, irq_res;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       static const char * const range_names[] = { "ti,sci-rm-range-tchan",
+                                                   "ti,sci-rm-range-rchan",
+                                                   "ti,sci-rm-range-rflow" };
+
+       cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+       cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+       ud->rflow_cnt = cap3 & 0x3fff;
+       ud->tchan_cnt = cap2 & 0x1ff;
+       ud->echan_cnt = (cap2 >> 9) & 0x1ff;
+       ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
+       ch_count  = ud->tchan_cnt + ud->rchan_cnt;
+
+       ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+                                          sizeof(unsigned long), GFP_KERNEL);
+       ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+                                 GFP_KERNEL);
+       ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+                                          sizeof(unsigned long), GFP_KERNEL);
+       ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+                                 GFP_KERNEL);
+       ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
+                                             sizeof(unsigned long),
+                                             GFP_KERNEL);
+       ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+                                                 BITS_TO_LONGS(ud->rflow_cnt),
+                                                 sizeof(unsigned long),
+                                                 GFP_KERNEL);
+       ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+                                       sizeof(unsigned long),
+                                       GFP_KERNEL);
+       ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+                                 GFP_KERNEL);
+
+       if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+           !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+           !ud->rflows || !ud->rflow_in_use)
+               return -ENOMEM;
+
+       /*
+        * RX flows with the same Ids as RX channels are reserved to be used
+        * as default flows if remote HW can't generate flow_ids. Those
+        * RX flows can be requested only explicitly by id.
+        */
+       bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+       /* by default no GP rflows are assigned to Linux */
+       bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
+
+       /* Get resource ranges from tisci */
+       for (i = 0; i < RM_RANGE_LAST; i++)
+               tisci_rm->rm_ranges[i] =
+                       devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+                                                   tisci_rm->tisci_dev_id,
+                                                   (char *)range_names[i]);
+
+       /* tchan ranges */
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+       if (IS_ERR(rm_res)) {
+               bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+       } else {
+               bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+               for (i = 0; i < rm_res->sets; i++) {
+                       rm_desc = &rm_res->desc[i];
+                       bitmap_clear(ud->tchan_map, rm_desc->start,
+                                    rm_desc->num);
+                       dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+                               rm_desc->start, rm_desc->num);
+               }
+       }
+       irq_res.sets = rm_res->sets;
+
+       /* rchan and matching default flow ranges */
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+       if (IS_ERR(rm_res)) {
+               bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+       } else {
+               bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+               for (i = 0; i < rm_res->sets; i++) {
+                       rm_desc = &rm_res->desc[i];
+                       bitmap_clear(ud->rchan_map, rm_desc->start,
+                                    rm_desc->num);
+                       dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+                               rm_desc->start, rm_desc->num);
+               }
+       }
+
+       irq_res.sets += rm_res->sets;
+       irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+       for (i = 0; i < rm_res->sets; i++) {
+               irq_res.desc[i].start = rm_res->desc[i].start;
+               irq_res.desc[i].num = rm_res->desc[i].num;
+       }
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+       for (j = 0; j < rm_res->sets; j++, i++) {
+               irq_res.desc[i].start = rm_res->desc[j].start +
+                                       ud->match_data->rchan_oes_offset;
+               irq_res.desc[i].num = rm_res->desc[j].num;
+       }
+       ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+       kfree(irq_res.desc);
+       if (ret) {
+               dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+               return ret;
+       }
+
+       /* GP rflow ranges */
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+       if (IS_ERR(rm_res)) {
+               /* all gp flows are assigned exclusively to Linux */
+               bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
+                            ud->rflow_cnt - ud->rchan_cnt);
+       } else {
+               for (i = 0; i < rm_res->sets; i++) {
+                       rm_desc = &rm_res->desc[i];
+                       bitmap_clear(ud->rflow_gp_map, rm_desc->start,
+                                    rm_desc->num);
+                       dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+                               rm_desc->start, rm_desc->num);
+               }
+       }
+
+       ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
+       ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
+       if (!ch_count)
+               return -ENODEV;
+
+       ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
+                                   GFP_KERNEL);
+       if (!ud->channels)
+               return -ENOMEM;
+
+       dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+                ch_count,
+                ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
+                ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
+                ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+                                              ud->rflow_cnt));
+
+       return ch_count;
+}
+
+#define TI_UDMAC_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int udma_probe(struct platform_device *pdev)
+{
+       struct device_node *navss_node = pdev->dev.parent->of_node;
+       struct device *dev = &pdev->dev;
+       struct udma_dev *ud;
+       const struct of_device_id *match;
+       int i, ret;
+       int ch_count;
+
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (ret)
+               dev_err(dev, "failed to set dma mask stuff\n");
+
+       ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
+       if (!ud)
+               return -ENOMEM;
+
+       ret = udma_get_mmrs(pdev, ud);
+       if (ret)
+               return ret;
+
+       ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
+       if (IS_ERR(ud->tisci_rm.tisci))
+               return PTR_ERR(ud->tisci_rm.tisci);
+
+       ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
+                                  &ud->tisci_rm.tisci_dev_id);
+       if (ret) {
+               dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
+               return ret;
+       }
+       pdev->id = ud->tisci_rm.tisci_dev_id;
+
+       ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
+                                  &ud->tisci_rm.tisci_navss_dev_id);
+       if (ret) {
+               dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
+               return ret;
+       }
+
+       ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
+       ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
+
+       ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+       if (IS_ERR(ud->ringacc))
+               return PTR_ERR(ud->ringacc);
+
+       dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+                                           DOMAIN_BUS_TI_SCI_INTA_MSI);
+       if (!dev->msi_domain) {
+               dev_err(dev, "Failed to get MSI domain\n");
+               return -EPROBE_DEFER;
+       }
+
+       match = of_match_node(udma_of_match, dev->of_node);
+       if (!match) {
+               dev_err(dev, "No compatible match found\n");
+               return -ENODEV;
+       }
+       ud->match_data = match->data;
+
+       dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+
+       ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
+       ud->ddev.device_config = udma_slave_config;
+       ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
+       ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+       ud->ddev.device_issue_pending = udma_issue_pending;
+       ud->ddev.device_tx_status = udma_tx_status;
+       ud->ddev.device_pause = udma_pause;
+       ud->ddev.device_resume = udma_resume;
+       ud->ddev.device_terminate_all = udma_terminate_all;
+       ud->ddev.device_synchronize = udma_synchronize;
+
+       ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+       ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
+       ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
+       ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
+       ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
+                                      DESC_METADATA_ENGINE;
+       if (ud->match_data->enable_memcpy_support) {
+               dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
+               ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
+               ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
+       }
+
+       ud->ddev.dev = dev;
+       ud->dev = dev;
+       ud->psil_base = ud->match_data->psil_base;
+
+       INIT_LIST_HEAD(&ud->ddev.channels);
+       INIT_LIST_HEAD(&ud->desc_to_purge);
+
+       ch_count = udma_setup_resources(ud);
+       if (ch_count <= 0)
+               return ch_count;
+
+       spin_lock_init(&ud->lock);
+       INIT_WORK(&ud->purge_work, udma_purge_desc_work);
+
+       ud->desc_align = 64;
+       if (ud->desc_align < dma_get_cache_alignment())
+               ud->desc_align = dma_get_cache_alignment();
+
+       for (i = 0; i < ud->tchan_cnt; i++) {
+               struct udma_tchan *tchan = &ud->tchans[i];
+
+               tchan->id = i;
+               tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
+       }
+
+       for (i = 0; i < ud->rchan_cnt; i++) {
+               struct udma_rchan *rchan = &ud->rchans[i];
+
+               rchan->id = i;
+               rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
+       }
+
+       for (i = 0; i < ud->rflow_cnt; i++) {
+               struct udma_rflow *rflow = &ud->rflows[i];
+
+               rflow->id = i;
+       }
+
+       for (i = 0; i < ch_count; i++) {
+               struct udma_chan *uc = &ud->channels[i];
+
+               uc->ud = ud;
+               uc->vc.desc_free = udma_desc_free;
+               uc->id = i;
+               uc->tchan = NULL;
+               uc->rchan = NULL;
+               uc->config.remote_thread_id = -1;
+               uc->config.dir = DMA_MEM_TO_MEM;
+               uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+                                         dev_name(dev), i);
+
+               vchan_init(&uc->vc, &ud->ddev);
+               /* Use custom vchan completion handling */
+               tasklet_init(&uc->vc.task, udma_vchan_complete,
+                            (unsigned long)&uc->vc);
+               init_completion(&uc->teardown_completed);
+       }
+
+       ret = dma_async_device_register(&ud->ddev);
+       if (ret) {
+               dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, ud);
+
+       ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
+       if (ret) {
+               dev_err(dev, "failed to register of_dma controller\n");
+               dma_async_device_unregister(&ud->ddev);
+       }
+
+       return ret;
+}
+
+static struct platform_driver udma_driver = {
+       .driver = {
+               .name   = "ti-udma",
+               .of_match_table = udma_of_match,
+               .suppress_bind_attrs = true,
+       },
+       .probe          = udma_probe,
+};
+builtin_platform_driver(udma_driver);
+
+/* Private interfaces to UDMA */
+#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
new file mode 100644 (file)
index 0000000..128d874
--- /dev/null
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_H_
+#define K3_UDMA_H_
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* Global registers */
+#define UDMA_REV_REG                   0x0
+#define UDMA_PERF_CTL_REG              0x4
+#define UDMA_EMU_CTL_REG               0x8
+#define UDMA_PSIL_TO_REG               0x10
+#define UDMA_UTC_CTL_REG               0x1c
+#define UDMA_CAP_REG(i)                        (0x20 + ((i) * 4))
+#define UDMA_RX_FLOW_ID_FW_OES_REG     0x80
+#define UDMA_RX_FLOW_ID_FW_STATUS_REG  0x88
+
+/* TX chan RT regs */
+#define UDMA_TCHAN_RT_CTL_REG          0x0
+#define UDMA_TCHAN_RT_SWTRIG_REG       0x8
+#define UDMA_TCHAN_RT_STDATA_REG       0x80
+
+#define UDMA_TCHAN_RT_PEER_REG(i)      (0x200 + ((i) * 0x4))
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG    \
+       UDMA_TCHAN_RT_PEER_REG(0)       /* PSI-L: 0x400 */
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG     \
+       UDMA_TCHAN_RT_PEER_REG(1)       /* PSI-L: 0x401 */
+#define UDMA_TCHAN_RT_PEER_BCNT_REG            \
+       UDMA_TCHAN_RT_PEER_REG(4)       /* PSI-L: 0x404 */
+#define UDMA_TCHAN_RT_PEER_RT_EN_REG           \
+       UDMA_TCHAN_RT_PEER_REG(8)       /* PSI-L: 0x408 */
+
+#define UDMA_TCHAN_RT_PCNT_REG         0x400
+#define UDMA_TCHAN_RT_BCNT_REG         0x408
+#define UDMA_TCHAN_RT_SBCNT_REG                0x410
+
+/* RX chan RT regs */
+#define UDMA_RCHAN_RT_CTL_REG          0x0
+#define UDMA_RCHAN_RT_SWTRIG_REG       0x8
+#define UDMA_RCHAN_RT_STDATA_REG       0x80
+
+#define UDMA_RCHAN_RT_PEER_REG(i)      (0x200 + ((i) * 0x4))
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG    \
+       UDMA_RCHAN_RT_PEER_REG(0)       /* PSI-L: 0x400 */
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG     \
+       UDMA_RCHAN_RT_PEER_REG(1)       /* PSI-L: 0x401 */
+#define UDMA_RCHAN_RT_PEER_BCNT_REG            \
+       UDMA_RCHAN_RT_PEER_REG(4)       /* PSI-L: 0x404 */
+#define UDMA_RCHAN_RT_PEER_RT_EN_REG           \
+       UDMA_RCHAN_RT_PEER_REG(8)       /* PSI-L: 0x408 */
+
+#define UDMA_RCHAN_RT_PCNT_REG         0x400
+#define UDMA_RCHAN_RT_BCNT_REG         0x408
+#define UDMA_RCHAN_RT_SBCNT_REG                0x410
+
+/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */
+#define UDMA_CHAN_RT_CTL_EN            BIT(31)
+#define UDMA_CHAN_RT_CTL_TDOWN         BIT(30)
+#define UDMA_CHAN_RT_CTL_PAUSE         BIT(29)
+#define UDMA_CHAN_RT_CTL_FTDOWN                BIT(28)
+#define UDMA_CHAN_RT_CTL_ERROR         BIT(0)
+
+/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */
+#define UDMA_PEER_RT_EN_ENABLE         BIT(31)
+#define UDMA_PEER_RT_EN_TEARDOWN       BIT(30)
+#define UDMA_PEER_RT_EN_PAUSE          BIT(29)
+#define UDMA_PEER_RT_EN_FLUSH          BIT(28)
+#define UDMA_PEER_RT_EN_IDLE           BIT(1)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
+ */
+#define PDMA_STATIC_TR_X_MASK          GENMASK(26, 24)
+#define PDMA_STATIC_TR_X_SHIFT         (24)
+#define PDMA_STATIC_TR_Y_MASK          GENMASK(11, 0)
+#define PDMA_STATIC_TR_Y_SHIFT         (0)
+
+#define PDMA_STATIC_TR_Y(x)    \
+       (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK)
+#define PDMA_STATIC_TR_X(x)    \
+       (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK)
+
+#define PDMA_STATIC_TR_XY_ACC32                BIT(30)
+#define PDMA_STATIC_TR_XY_BURST                BIT(31)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
+ */
+#define PDMA_STATIC_TR_Z(x, mask)      ((x) & (mask))
+
+struct udma_dev;
+struct udma_tchan;
+struct udma_rchan;
+struct udma_rflow;
+
+enum udma_rm_range {
+       RM_RANGE_TCHAN = 0,
+       RM_RANGE_RCHAN,
+       RM_RANGE_RFLOW,
+       RM_RANGE_LAST,
+};
+
+struct udma_tisci_rm {
+       const struct ti_sci_handle *tisci;
+       const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
+       u32  tisci_dev_id;
+
+       /* tisci information for PSI-L thread pairing/unpairing */
+       const struct ti_sci_rm_psil_ops *tisci_psil_ops;
+       u32  tisci_navss_dev_id;
+
+       struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
+};
+
+/* Direct access to UDMA low lever resources for the glue layer */
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+                           u32 dst_thread);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+void xudma_dev_put(struct udma_dev *ud);
+u32 xudma_dev_get_psil_base(struct udma_dev *ud);
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+
+struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
+struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id);
+
+void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p);
+void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p);
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p);
+
+int xudma_tchan_get_id(struct udma_tchan *p);
+int xudma_rchan_get_id(struct udma_rchan *p);
+int xudma_rflow_get_id(struct udma_rflow *p);
+
+u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
+void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
+u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
+void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+
+#endif /* K3_UDMA_H_ */
index 256fc66..23e33a8 100644 (file)
@@ -114,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
        struct virt_dma_desc *vd, *_vd;
 
        list_for_each_entry_safe(vd, _vd, head, node) {
-               if (dmaengine_desc_test_reuse(&vd->tx)) {
-                       list_move_tail(&vd->node, &vc->desc_allocated);
-               } else {
-                       dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
-                       list_del(&vd->node);
-                       vc->desc_free(vd);
-               }
+               list_del(&vd->node);
+               vchan_vdesc_fini(vd);
        }
 }
 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -134,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
        INIT_LIST_HEAD(&vc->desc_submitted);
        INIT_LIST_HEAD(&vc->desc_issued);
        INIT_LIST_HEAD(&vc->desc_completed);
+       INIT_LIST_HEAD(&vc->desc_terminated);
 
        tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
 
index ab158ba..e9f5250 100644 (file)
@@ -31,9 +31,9 @@ struct virt_dma_chan {
        struct list_head desc_submitted;
        struct list_head desc_issued;
        struct list_head desc_completed;
+       struct list_head desc_terminated;
 
        struct virt_dma_desc *cyclic;
-       struct virt_dma_desc *vd_terminated;
 };
 
 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
 {
        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 
-       if (dmaengine_desc_test_reuse(&vd->tx))
+       if (dmaengine_desc_test_reuse(&vd->tx)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&vc->lock, flags);
                list_add(&vd->node, &vc->desc_allocated);
-       else
+               spin_unlock_irqrestore(&vc->lock, flags);
+       } else {
                vc->desc_free(vd);
+       }
 }
 
 /**
@@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
 {
        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 
-       /* free up stuck descriptor */
-       if (vc->vd_terminated)
-               vchan_vdesc_fini(vc->vd_terminated);
+       list_add_tail(&vd->node, &vc->desc_terminated);
 
-       vc->vd_terminated = vd;
        if (vc->cyclic == vd)
                vc->cyclic = NULL;
 }
@@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
        list_splice_tail_init(&vc->desc_submitted, head);
        list_splice_tail_init(&vc->desc_issued, head);
        list_splice_tail_init(&vc->desc_completed, head);
+       list_splice_tail_init(&vc->desc_terminated, head);
 }
 
 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
@@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
  */
 static inline void vchan_synchronize(struct virt_dma_chan *vc)
 {
+       LIST_HEAD(head);
        unsigned long flags;
 
        tasklet_kill(&vc->task);
 
        spin_lock_irqsave(&vc->lock, flags);
-       if (vc->vd_terminated) {
-               vchan_vdesc_fini(vc->vd_terminated);
-               vc->vd_terminated = NULL;
-       }
+
+       list_splice_tail_init(&vc->desc_terminated, &head);
+
        spin_unlock_irqrestore(&vc->lock, flags);
+
+       vchan_dma_desc_free_list(vc, &head);
 }
 
 #endif
index 9c845c0..d47749a 100644 (file)
 /* Max transfer size per descriptor */
 #define ZYNQMP_DMA_MAX_TRANS_LEN       0x40000000
 
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN    32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN    32768U
+
 /* Reset values for data attributes */
 #define ZYNQMP_DMA_AXCACHE_VAL         0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL       0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL       0xF
 
 #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL   0x1F
 
@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
 
 static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
 {
-       u32 val;
+       u32 val, burst_val;
 
        val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
        val |= ZYNQMP_DMA_POINT_TYPE_SG;
        writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
 
        val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+       burst_val = __ilog2_u32(chan->src_burst_len);
        val = (val & ~ZYNQMP_DMA_ARLEN) |
-               (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+               ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+       burst_val = __ilog2_u32(chan->dst_burst_len);
        val = (val & ~ZYNQMP_DMA_AWLEN) |
-               (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+               ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
        writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
 }
 
@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
 {
        struct zynqmp_dma_chan *chan = to_chan(dchan);
 
-       chan->src_burst_len = config->src_maxburst;
-       chan->dst_burst_len = config->dst_maxburst;
+       chan->src_burst_len = clamp(config->src_maxburst, 1U,
+               ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+       chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+               ZYNQMP_DMA_MAX_DST_BURST_LEN);
 
        return 0;
 }
@@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
                return PTR_ERR(chan->regs);
 
        chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
-       chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
-       chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+       chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+       chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
        err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
        if (err < 0) {
                dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
index 5c82723..b3c99bb 100644 (file)
@@ -491,8 +491,7 @@ config EDAC_TI
        tristate "Texas Instruments DDR3 ECC Controller"
        depends on ARCH_KEYSTONE || SOC_DRA7XX
        help
-         Support for error detection and correction on the
-          TI SoCs.
+         Support for error detection and correction on the TI SoCs.
 
 config EDAC_QCOM
        tristate "QCOM EDAC Controller"
index 428ce98..9fbad90 100644 (file)
@@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
 
        scrubval = scrubrates[i].scrubval;
 
-       if (pvt->fam == 0x17 || pvt->fam == 0x18) {
+       if (pvt->umc) {
                __f17h_set_scrubval(pvt, scrubval);
        } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
                f15h_select_dct(pvt, 0);
@@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
        int i, retval = -EINVAL;
        u32 scrubval = 0;
 
-       switch (pvt->fam) {
-       case 0x15:
-               /* Erratum #505 */
-               if (pvt->model < 0x10)
-                       f15h_select_dct(pvt, 0);
-
-               if (pvt->model == 0x60)
-                       amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
-               break;
-
-       case 0x17:
-       case 0x18:
+       if (pvt->umc) {
                amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
                if (scrubval & BIT(0)) {
                        amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
                } else {
                        scrubval = 0;
                }
-               break;
+       } else if (pvt->fam == 0x15) {
+               /* Erratum #505 */
+               if (pvt->model < 0x10)
+                       f15h_select_dct(pvt, 0);
 
-       default:
+               if (pvt->model == 0x60)
+                       amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+       } else {
                amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
-               break;
        }
 
        scrubval = scrubval & 0x001F;
@@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt)
 {
        u32 dram_ctrl, dcsm;
 
+       if (pvt->umc) {
+               if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+                       pvt->dram_type = MEM_LRDDR4;
+               else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+                       pvt->dram_type = MEM_RDDR4;
+               else
+                       pvt->dram_type = MEM_DDR4;
+               return;
+       }
+
        switch (pvt->fam) {
        case 0xf:
                if (pvt->ext_model >= K8_REV_F)
@@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt)
        case 0x16:
                goto ddr3;
 
-       case 0x17:
-       case 0x18:
-               if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
-                       pvt->dram_type = MEM_LRDDR4;
-               else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
-                       pvt->dram_type = MEM_RDDR4;
-               else
-                       pvt->dram_type = MEM_DDR4;
-               return;
-
        default:
                WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
                pvt->dram_type = MEM_EMPTY;
@@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = {
                        .dbam_to_cs             = f17_addr_mask_to_cs_size,
                }
        },
+       [F19_CPUS] = {
+               .ctl_name = "F19h",
+               .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+               .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+               .max_mcs = 8,
+               .ops = {
+                       .early_channel_count    = f17_early_channel_count,
+                       .dbam_to_cs             = f17_addr_mask_to_cs_size,
+               }
+       },
 };
 
 /*
@@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
                        family_types[F17_CPUS].ctl_name = "F18h";
                break;
 
+       case 0x19:
+               fam_type        = &family_types[F19_CPUS];
+               pvt->ops        = &family_types[F19_CPUS].ops;
+               family_types[F19_CPUS].ctl_name = "F19h";
+               break;
+
        default:
                amd64_err("Unsupported family!\n");
                return NULL;
@@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid)
        struct mem_ctl_info *mci;
        struct amd64_pvt *pvt;
 
-       mci = find_mci_by_dev(&F3->dev);
-       WARN_ON(!mci);
-
        /* Remove from EDAC CORE tracking list */
        mci = edac_mc_del_mc(&F3->dev);
        if (!mci)
@@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
        { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,  X86_FEATURE_ANY, 0 },
        { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY,  X86_FEATURE_ANY, 0 },
        { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+       { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY,  X86_FEATURE_ANY, 0 },
        { }
 };
 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
index 9be3168..abbf3c2 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0    0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6    0x1656
 
 /*
  * Function 1 - Address Map
@@ -292,6 +294,7 @@ enum amd_families {
        F17_M10H_CPUS,
        F17_M30H_CPUS,
        F17_M70H_CPUS,
+       F19_CPUS,
        NUM_FAMILIES,
 };
 
index 09a9e3d..b194658 100644 (file)
@@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci)
        if (!np) {
                dev_err(mci->pdev, "dt: missing /memory node\n");
                return -ENODEV;
-       };
+       }
 
        rc = of_address_to_resource(np, 0, &r);
 
@@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci)
        if (rc) {
                dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
                return rc;
-       };
+       }
 
        dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
                r.start, resource_size(&r), PAGE_SHIFT);
index 0ddc41e..191aa7c 100644 (file)
@@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a)
        return a & ((1 << 16) - 1);
 }
 
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
-       return a & ((1 << 18) - 1);
-}
-
 static inline u32 i5100_recmema_merr(u32 a)
 {
        return i5100_nrecmema_merr(a);
@@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
        u32 dw;
        u32 dw2;
        unsigned syndrome = 0;
-       unsigned ecc_loc = 0;
        unsigned merr;
        unsigned bank;
        unsigned rank;
@@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
                pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
                syndrome = dw2;
                pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
-               ecc_loc = i5100_redmemb_ecc_locator(dw2);
        }
 
        if (i5100_validlog_recmemvalid(dw)) {
index ea622c6..ea980c5 100644 (file)
@@ -6,7 +6,7 @@
 
 #include "mce_amd.h"
 
-static struct amd_decoder_ops *fam_ops;
+static struct amd_decoder_ops fam_ops;
 
 static u8 xec_mask      = 0xf;
 
@@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = {
        "L2 Fill Data error",
 };
 
+static const char * const smca_ls2_mce_desc[] = {
+       "An ECC error was detected on a data cache read by a probe or victimization",
+       "An ECC error or L2 poison was detected on a data cache read by a load",
+       "An ECC error was detected on a data cache read-modify-write by a store",
+       "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
+       "An ECC error or poison bit mismatch was detected on a tag read by a load",
+       "An ECC error or poison bit mismatch was detected on a tag read by a store",
+       "An ECC error was detected on an EMEM read by a load",
+       "An ECC error was detected on an EMEM read-modify-write by a store",
+       "A parity error was detected in an L1 TLB entry by any access",
+       "A parity error was detected in an L2 TLB entry by any access",
+       "A parity error was detected in a PWC entry by any access",
+       "A parity error was detected in an STQ entry by any access",
+       "A parity error was detected in an LDQ entry by any access",
+       "A parity error was detected in a MAB entry by any access",
+       "A parity error was detected in an SCB entry state field by any access",
+       "A parity error was detected in an SCB entry address field by any access",
+       "A parity error was detected in an SCB entry data field by any access",
+       "A parity error was detected in a WCB entry by any access",
+       "A poisoned line was detected in an SCB entry by any access",
+       "A SystemReadDataError error was reported on read data returned from L2 for a load",
+       "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
+       "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
+       "A hardware assertion error was reported",
+       "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
+};
+
 static const char * const smca_if_mce_desc[] = {
        "Op Cache Microtag Probe Port Parity Error",
        "IC Microtag or Full Tag Multi-hit Error",
@@ -378,6 +405,7 @@ struct smca_mce_desc {
 
 static struct smca_mce_desc smca_mce_descs[] = {
        [SMCA_LS]       = { smca_ls_mce_desc,   ARRAY_SIZE(smca_ls_mce_desc)    },
+       [SMCA_LS_V2]    = { smca_ls2_mce_desc,  ARRAY_SIZE(smca_ls2_mce_desc)   },
        [SMCA_IF]       = { smca_if_mce_desc,   ARRAY_SIZE(smca_if_mce_desc)    },
        [SMCA_L2_CACHE] = { smca_l2_mce_desc,   ARRAY_SIZE(smca_l2_mce_desc)    },
        [SMCA_DE]       = { smca_de_mce_desc,   ARRAY_SIZE(smca_de_mce_desc)    },
@@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m)
                                            : (xec ? "multimatch" : "parity")));
                        return;
                }
-       } else if (fam_ops->mc0_mce(ec, xec))
+       } else if (fam_ops.mc0_mce(ec, xec))
                ;
        else
                pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
@@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m)
                        pr_cont("Hardware Assert.\n");
                else
                        goto wrong_mc1_mce;
-       } else if (fam_ops->mc1_mce(ec, xec))
+       } else if (fam_ops.mc1_mce(ec, xec))
                ;
        else
                goto wrong_mc1_mce;
@@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m)
 
        pr_emerg(HW_ERR "MC2 Error: ");
 
-       if (!fam_ops->mc2_mce(ec, xec))
+       if (!fam_ops.mc2_mce(ec, xec))
                pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
 }
 
@@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
        if (m->tsc)
                pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
 
-       if (!fam_ops)
+       /* Doesn't matter which member to test. */
+       if (!fam_ops.mc0_mce)
                goto err_code;
 
        switch (m->bank) {
@@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void)
            c->x86_vendor != X86_VENDOR_HYGON)
                return -ENODEV;
 
-       fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
-       if (!fam_ops)
-               return -ENOMEM;
+       if (boot_cpu_has(X86_FEATURE_SMCA)) {
+               xec_mask = 0x3f;
+               goto out;
+       }
 
        switch (c->x86) {
        case 0xf:
-               fam_ops->mc0_mce = k8_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = k8_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x10:
-               fam_ops->mc0_mce = f10h_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = f10h_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x11:
-               fam_ops->mc0_mce = k8_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = k8_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x12:
-               fam_ops->mc0_mce = f12h_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = f12h_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x14:
-               fam_ops->mc0_mce = cat_mc0_mce;
-               fam_ops->mc1_mce = cat_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = cat_mc0_mce;
+               fam_ops.mc1_mce = cat_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x15:
                xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
 
-               fam_ops->mc0_mce = f15h_mc0_mce;
-               fam_ops->mc1_mce = f15h_mc1_mce;
-               fam_ops->mc2_mce = f15h_mc2_mce;
+               fam_ops.mc0_mce = f15h_mc0_mce;
+               fam_ops.mc1_mce = f15h_mc1_mce;
+               fam_ops.mc2_mce = f15h_mc2_mce;
                break;
 
        case 0x16:
                xec_mask = 0x1f;
-               fam_ops->mc0_mce = cat_mc0_mce;
-               fam_ops->mc1_mce = cat_mc1_mce;
-               fam_ops->mc2_mce = f16h_mc2_mce;
+               fam_ops.mc0_mce = cat_mc0_mce;
+               fam_ops.mc1_mce = cat_mc1_mce;
+               fam_ops.mc2_mce = f16h_mc2_mce;
                break;
 
        case 0x17:
        case 0x18:
-               xec_mask = 0x3f;
-               if (!boot_cpu_has(X86_FEATURE_SMCA)) {
-                       printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
-                       goto err_out;
-               }
-               break;
+               pr_warn("Decoding supported only on Scalable MCA processors.\n");
+               return -EINVAL;
 
        default:
                printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
-               goto err_out;
+               return -EINVAL;
        }
 
+out:
        pr_info("MCE: In-kernel MCE decoding enabled.\n");
 
        mce_register_decode_chain(&amd_mce_dec_nb);
 
        return 0;
-
-err_out:
-       kfree(fam_ops);
-       fam_ops = NULL;
-       return -EINVAL;
 }
 early_initcall(mce_amd_init);
 
@@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init);
 static void __exit mce_amd_exit(void)
 {
        mce_unregister_decode_chain(&amd_mce_dec_nb);
-       kfree(fam_ops);
 }
 
 MODULE_DESCRIPTION("AMD MCE decoder");
index c0cc72a..3a3dcb1 100644 (file)
@@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
        p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
                                            1, 1, NULL, 0,
                                            edac_device_alloc_index());
-       if (IS_ERR(p->dci))
-               return PTR_ERR(p->dci);
+       if (!p->dci)
+               return -ENOMEM;
 
        p->dci->dev = &pdev->dev;
        p->dci->mod_name = "Sifive ECC Manager";
index 95662a4..99bbaf6 100644 (file)
@@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
 
        pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
        if (!pdev) {
-               skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+               edac_dbg(2, "Can't get tolm/tohm\n");
                return -ENODEV;
        }
 
index b696e45..1b3f217 100644 (file)
@@ -131,27 +131,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
                                 enum of_gpio_flags *flags,
                                 int index)
 {
-       /*
-        * Handle MMC "cd-inverted" and "wp-inverted" semantics.
-        */
-       if (IS_ENABLED(CONFIG_MMC)) {
-               /*
-                * Active low is the default according to the
-                * SDHCI specification and the device tree
-                * bindings. However the code in the current
-                * kernel was written such that the phandle
-                * flags were always respected, and "cd-inverted"
-                * would invert the flag from the device phandle.
-                */
-               if (!strcmp(propname, "cd-gpios")) {
-                       if (of_property_read_bool(np, "cd-inverted"))
-                               *flags ^= OF_GPIO_ACTIVE_LOW;
-               }
-               if (!strcmp(propname, "wp-gpios")) {
-                       if (of_property_read_bool(np, "wp-inverted"))
-                               *flags ^= OF_GPIO_ACTIVE_LOW;
-               }
-       }
        /*
         * Some GPIO fixed regulator quirks.
         * Note that active low is the default.
index 78a16e4..bcfbfde 100644 (file)
@@ -3371,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
 }
 EXPORT_SYMBOL_GPL(gpiod_is_active_low);
 
+/**
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to change
+ */
+void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+       VALIDATE_DESC_VOID(desc);
+       change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+
 /* I/O calls are only valid after configuration completed; the relevant
  * "is this a valid GPIO" error checks should already have been done.
  *
index 01a793a..30a1e3a 100644 (file)
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
 
        /* Renoir */
-       {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
 
        /* Navi12 */
        {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
index 5a61a55..e6afe4f 100644 (file)
@@ -1916,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
        return parent_lct + 1;
 }
 
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+       switch (pdt) {
+       case DP_PEER_DEVICE_DP_LEGACY_CONV:
+       case DP_PEER_DEVICE_SST_SINK:
+               return true;
+       case DP_PEER_DEVICE_MST_BRANCHING:
+               /* For sst branch device */
+               if (!mcs)
+                       return true;
+
+               return false;
+       }
+       return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+                   bool new_mcs)
 {
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
        struct drm_dp_mst_branch *mstb;
        u8 rad[8], lct;
        int ret = 0;
 
-       if (port->pdt == new_pdt)
+       if (port->pdt == new_pdt && port->mcs == new_mcs)
                return 0;
 
        /* Teardown the old pdt, if there is one */
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /*
-                * If the new PDT would also have an i2c bus, don't bother
-                * with reregistering it
-                */
-               if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-                   new_pdt == DP_PEER_DEVICE_SST_SINK) {
-                       port->pdt = new_pdt;
-                       return 0;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /*
+                        * If the new PDT would also have an i2c bus,
+                        * don't bother with reregistering it
+                        */
+                       if (new_pdt != DP_PEER_DEVICE_NONE &&
+                           drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+                               port->pdt = new_pdt;
+                               port->mcs = new_mcs;
+                               return 0;
+                       }
 
-               /* remove i2c over sideband */
-               drm_dp_mst_unregister_i2c_bus(&port->aux);
-               break;
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               mutex_lock(&mgr->lock);
-               drm_dp_mst_topology_put_mstb(port->mstb);
-               port->mstb = NULL;
-               mutex_unlock(&mgr->lock);
-               break;
+                       /* remove i2c over sideband */
+                       drm_dp_mst_unregister_i2c_bus(&port->aux);
+               } else {
+                       mutex_lock(&mgr->lock);
+                       drm_dp_mst_topology_put_mstb(port->mstb);
+                       port->mstb = NULL;
+                       mutex_unlock(&mgr->lock);
+               }
        }
 
        port->pdt = new_pdt;
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /* add i2c over sideband */
-               ret = drm_dp_mst_register_i2c_bus(&port->aux);
-               break;
+       port->mcs = new_mcs;
 
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               lct = drm_dp_calculate_rad(port, rad);
-               mstb = drm_dp_add_mst_branch_device(lct, rad);
-               if (!mstb) {
-                       ret = -ENOMEM;
-                       DRM_ERROR("Failed to create MSTB for port %p", port);
-                       goto out;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /* add i2c over sideband */
+                       ret = drm_dp_mst_register_i2c_bus(&port->aux);
+               } else {
+                       lct = drm_dp_calculate_rad(port, rad);
+                       mstb = drm_dp_add_mst_branch_device(lct, rad);
+                       if (!mstb) {
+                               ret = -ENOMEM;
+                               DRM_ERROR("Failed to create MSTB for port %p",
+                                         port);
+                               goto out;
+                       }
 
-               mutex_lock(&mgr->lock);
-               port->mstb = mstb;
-               mstb->mgr = port->mgr;
-               mstb->port_parent = port;
+                       mutex_lock(&mgr->lock);
+                       port->mstb = mstb;
+                       mstb->mgr = port->mgr;
+                       mstb->port_parent = port;
 
-               /*
-                * Make sure this port's memory allocation stays
-                * around until its child MSTB releases it
-                */
-               drm_dp_mst_get_port_malloc(port);
-               mutex_unlock(&mgr->lock);
+                       /*
+                        * Make sure this port's memory allocation stays
+                        * around until its child MSTB releases it
+                        */
+                       drm_dp_mst_get_port_malloc(port);
+                       mutex_unlock(&mgr->lock);
 
-               /* And make sure we send a link address for this */
-               ret = 1;
-               break;
+                       /* And make sure we send a link address for this */
+                       ret = 1;
+               }
        }
 
 out:
@@ -2135,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
                goto error;
        }
 
-       if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-            port->pdt == DP_PEER_DEVICE_SST_SINK) &&
-           port->port_num >= DP_MST_LOGICAL_PORT_0) {
+       if (port->pdt != DP_PEER_DEVICE_NONE &&
+           drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
                port->cached_edid = drm_get_edid(port->connector,
                                                 &port->aux.ddc);
                drm_connector_set_tile_property(port->connector);
@@ -2201,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        struct drm_dp_mst_port *port;
        int old_ddps = 0, ret;
        u8 new_pdt = DP_PEER_DEVICE_NONE;
+       bool new_mcs = 0;
        bool created = false, send_link_addr = false, changed = false;
 
        port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2245,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        port->input = port_msg->input_port;
        if (!port->input)
                new_pdt = port_msg->peer_device_type;
-       port->mcs = port_msg->mcs;
+       new_mcs = port_msg->mcs;
        port->ddps = port_msg->ddps;
        port->ldps = port_msg->legacy_device_plug_status;
        port->dpcd_rev = port_msg->dpcd_revision;
@@ -2272,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                }
        }
 
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                send_link_addr = true;
        } else if (ret < 0) {
@@ -2286,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
         * we're coming out of suspend. In this case, always resend the link
         * address if there's an MSTB on this port
         */
-       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+           port->mcs)
                send_link_addr = true;
 
        if (port->connector)
@@ -2323,6 +2341,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        struct drm_dp_mst_port *port;
        int old_ddps, old_input, ret, i;
        u8 new_pdt;
+       bool new_mcs;
        bool dowork = false, create_connector = false;
 
        port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2354,7 +2373,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        old_ddps = port->ddps;
        old_input = port->input;
        port->input = conn_stat->input_port;
-       port->mcs = conn_stat->message_capability_status;
        port->ldps = conn_stat->legacy_device_plug_status;
        port->ddps = conn_stat->displayport_device_plug_status;
 
@@ -2367,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       new_mcs = conn_stat->message_capability_status;
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                dowork = true;
        } else if (ret < 0) {
@@ -3929,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
        switch (port->pdt) {
        case DP_PEER_DEVICE_NONE:
        case DP_PEER_DEVICE_MST_BRANCHING:
+               if (!port->mcs)
+                       ret = connector_status_connected;
                break;
 
        case DP_PEER_DEVICE_SST_SINK:
@@ -4541,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
        if (port->connector)
                port->mgr->cbs->destroy_connector(port->mgr, port->connector);
 
-       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
        drm_dp_mst_put_port_malloc(port);
 }
 
index 3d4f577..25235ef 100644 (file)
@@ -9,16 +9,16 @@
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
 
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
 {
-       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+       if (id == (u16)I915_ENGINE_CLASS_INVALID)
                return 0xffff0000u;
 
        GEM_BUG_ON(id >= 16);
        return 0x10000u << id;
 }
 
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
 {
        /*
         * The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
         * last_read - hence we always set both read and write busy for
         * last_write.
         */
-       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+       if (id == (u16)I915_ENGINE_CLASS_INVALID)
                return 0xffffffffu;
 
        return (id + 1) | __busy_read_flag(id);
 }
 
 static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
 {
        const struct i915_request *rq;
 
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
                return 0;
 
        /* Beware type-expansion follies! */
-       BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+       BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
        return flag(rq->engine->uabi_class);
 }
 
index 4c72d74..0dbb44d 100644 (file)
@@ -402,7 +402,7 @@ struct get_pages_work {
 
 static struct sg_table *
 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
-                              struct page **pvec, int num_pages)
+                              struct page **pvec, unsigned long num_pages)
 {
        unsigned int max_segment = i915_sg_segment_size();
        struct sg_table *st;
@@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
        struct get_pages_work *work = container_of(_work, typeof(*work), work);
        struct drm_i915_gem_object *obj = work->obj;
-       const int npages = obj->base.size >> PAGE_SHIFT;
+       const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+       unsigned long pinned;
        struct page **pvec;
-       int pinned, ret;
+       int ret;
 
        ret = -ENOMEM;
        pinned = 0;
@@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 
 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
-       const int num_pages = obj->base.size >> PAGE_SHIFT;
+       const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        struct mm_struct *mm = obj->userptr.mm->mm;
        struct page **pvec;
        struct sg_table *pages;
index 17f1f14..2b44647 100644 (file)
@@ -274,8 +274,8 @@ struct intel_engine_cs {
        u8 class;
        u8 instance;
 
-       u8 uabi_class;
-       u8 uabi_instance;
+       u16 uabi_class;
+       u16 uabi_instance;
 
        u32 uabi_capabilities;
        u32 context_size;
index c083f51..4472780 100644 (file)
@@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
        pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
        vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
        do {
+               GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
                vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
 
                iter->dma += I915_GTT_PAGE_SIZE;
@@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 
        vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
        do {
+               GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
                vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
 
                iter.dma += I915_GTT_PAGE_SIZE;
index f61364f..88b431a 100644 (file)
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
                struct drm_file *file)
 {
+       struct panfrost_file_priv *priv = file->driver_priv;
        struct panfrost_gem_object *bo;
        struct drm_panfrost_create_bo *args = data;
+       struct panfrost_gem_mapping *mapping;
 
        if (!args->size || args->pad ||
            (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
-       args->offset = bo->node.start << PAGE_SHIFT;
+       mapping = panfrost_gem_mapping_get(bo, priv);
+       if (!mapping) {
+               drm_gem_object_put_unlocked(&bo->base.base);
+               return -EINVAL;
+       }
+
+       args->offset = mapping->mmnode.start << PAGE_SHIFT;
+       panfrost_gem_mapping_put(mapping);
 
        return 0;
 }
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
                  struct drm_panfrost_submit *args,
                  struct panfrost_job *job)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_object *bo;
+       unsigned int i;
+       int ret;
+
        job->bo_count = args->bo_handle_count;
 
        if (!job->bo_count)
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
        if (!job->implicit_fences)
                return -ENOMEM;
 
-       return drm_gem_objects_lookup(file_priv,
-                                     (void __user *)(uintptr_t)args->bo_handles,
-                                     job->bo_count, &job->bos);
+       ret = drm_gem_objects_lookup(file_priv,
+                                    (void __user *)(uintptr_t)args->bo_handles,
+                                    job->bo_count, &job->bos);
+       if (ret)
+               return ret;
+
+       job->mappings = kvmalloc_array(job->bo_count,
+                                      sizeof(struct panfrost_gem_mapping *),
+                                      GFP_KERNEL | __GFP_ZERO);
+       if (!job->mappings)
+               return -ENOMEM;
+
+       for (i = 0; i < job->bo_count; i++) {
+               struct panfrost_gem_mapping *mapping;
+
+               bo = to_panfrost_bo(job->bos[i]);
+               mapping = panfrost_gem_mapping_get(bo, priv);
+               if (!mapping) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               job->mappings[i] = mapping;
+       }
+
+       return ret;
 }
 
 /**
@@ -320,7 +357,9 @@ out:
 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
        struct drm_panfrost_get_bo_offset *args = data;
+       struct panfrost_gem_mapping *mapping;
        struct drm_gem_object *gem_obj;
        struct panfrost_gem_object *bo;
 
@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
        }
        bo = to_panfrost_bo(gem_obj);
 
-       args->offset = bo->node.start << PAGE_SHIFT;
-
+       mapping = panfrost_gem_mapping_get(bo, priv);
        drm_gem_object_put_unlocked(gem_obj);
+
+       if (!mapping)
+               return -EINVAL;
+
+       args->offset = mapping->mmnode.start << PAGE_SHIFT;
+       panfrost_gem_mapping_put(mapping);
        return 0;
 }
 
 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
        struct drm_panfrost_madvise *args = data;
        struct panfrost_device *pfdev = dev->dev_private;
        struct drm_gem_object *gem_obj;
+       struct panfrost_gem_object *bo;
+       int ret = 0;
 
        gem_obj = drm_gem_object_lookup(file_priv, args->handle);
        if (!gem_obj) {
@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                return -ENOENT;
        }
 
+       bo = to_panfrost_bo(gem_obj);
+
        mutex_lock(&pfdev->shrinker_lock);
+       mutex_lock(&bo->mappings.lock);
+       if (args->madv == PANFROST_MADV_DONTNEED) {
+               struct panfrost_gem_mapping *first;
+
+               first = list_first_entry(&bo->mappings.list,
+                                        struct panfrost_gem_mapping,
+                                        node);
+
+               /*
+                * If we want to mark the BO purgeable, there must be only one
+                * user: the caller FD.
+                * We could do something smarter and mark the BO purgeable only
+                * when all its users have marked it purgeable, but globally
+                * visible/shared BOs are likely to never be marked purgeable
+                * anyway, so let's not bother.
+                */
+               if (!list_is_singular(&bo->mappings.list) ||
+                   WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+                       ret = -EINVAL;
+                       goto out_unlock_mappings;
+               }
+       }
+
        args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
 
        if (args->retained) {
-               struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
                if (args->madv == PANFROST_MADV_DONTNEED)
                        list_add_tail(&bo->base.madv_list,
                                      &pfdev->shrinker_list);
                else if (args->madv == PANFROST_MADV_WILLNEED)
                        list_del_init(&bo->base.madv_list);
        }
+
+out_unlock_mappings:
+       mutex_unlock(&bo->mappings.lock);
        mutex_unlock(&pfdev->shrinker_lock);
 
        drm_gem_object_put_unlocked(gem_obj);
-       return 0;
+       return ret;
 }
 
 int panfrost_unstable_ioctl_check(void)
index fd766b1..17b654e 100644 (file)
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
        list_del_init(&bo->base.madv_list);
        mutex_unlock(&pfdev->shrinker_lock);
 
+       /*
+        * If we still have mappings attached to the BO, there's a problem in
+        * our refcounting.
+        */
+       WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
        if (bo->sgts) {
                int i;
                int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
        drm_gem_shmem_free_object(obj);
 }
 
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+                        struct panfrost_file_priv *priv)
+{
+       struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(iter, &bo->mappings.list, node) {
+               if (iter->mmu == &priv->mmu) {
+                       kref_get(&iter->refcount);
+                       mapping = iter;
+                       break;
+               }
+       }
+       mutex_unlock(&bo->mappings.lock);
+
+       return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+       struct panfrost_file_priv *priv;
+
+       if (mapping->active)
+               panfrost_mmu_unmap(mapping);
+
+       priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+       spin_lock(&priv->mm_lock);
+       if (drm_mm_node_allocated(&mapping->mmnode))
+               drm_mm_remove_node(&mapping->mmnode);
+       spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+       struct panfrost_gem_mapping *mapping;
+
+       mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+       panfrost_gem_teardown_mapping(mapping);
+       drm_gem_object_put_unlocked(&mapping->obj->base.base);
+       kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+       if (!mapping)
+               return;
+
+       kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+       struct panfrost_gem_mapping *mapping;
+
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(mapping, &bo->mappings.list, node)
+               panfrost_gem_teardown_mapping(mapping);
+       mutex_unlock(&bo->mappings.lock);
+}
+
 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
        int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
        struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
        struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_mapping *mapping;
+
+       mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+       if (!mapping)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&mapping->node);
+       kref_init(&mapping->refcount);
+       drm_gem_object_get(obj);
+       mapping->obj = bo;
 
        /*
         * Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
        else
                align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
 
-       bo->mmu = &priv->mmu;
+       mapping->mmu = &priv->mmu;
        spin_lock(&priv->mm_lock);
-       ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+       ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
                                         size >> PAGE_SHIFT, align, color, 0);
        spin_unlock(&priv->mm_lock);
        if (ret)
-               return ret;
+               goto err;
 
        if (!bo->is_heap) {
-               ret = panfrost_mmu_map(bo);
-               if (ret) {
-                       spin_lock(&priv->mm_lock);
-                       drm_mm_remove_node(&bo->node);
-                       spin_unlock(&priv->mm_lock);
-               }
+               ret = panfrost_mmu_map(mapping);
+               if (ret)
+                       goto err;
        }
+
+       mutex_lock(&bo->mappings.lock);
+       WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+       list_add_tail(&mapping->node, &bo->mappings.list);
+       mutex_unlock(&bo->mappings.lock);
+
+err:
+       if (ret)
+               panfrost_gem_mapping_put(mapping);
        return ret;
 }
 
 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
-       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+       struct panfrost_gem_mapping *mapping = NULL, *iter;
 
-       if (bo->is_mapped)
-               panfrost_mmu_unmap(bo);
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(iter, &bo->mappings.list, node) {
+               if (iter->mmu == &priv->mmu) {
+                       mapping = iter;
+                       list_del(&iter->node);
+                       break;
+               }
+       }
+       mutex_unlock(&bo->mappings.lock);
 
-       spin_lock(&priv->mm_lock);
-       if (drm_mm_node_allocated(&bo->node))
-               drm_mm_remove_node(&bo->node);
-       spin_unlock(&priv->mm_lock);
+       panfrost_gem_mapping_put(mapping);
 }
 
 static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
        if (!obj)
                return NULL;
 
+       INIT_LIST_HEAD(&obj->mappings.list);
+       mutex_init(&obj->mappings.lock);
        obj->base.base.funcs = &panfrost_gem_funcs;
 
        return &obj->base.base;
index 4b17e73..ca1bc90 100644 (file)
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
        struct drm_gem_shmem_object base;
        struct sg_table *sgts;
 
-       struct panfrost_mmu *mmu;
-       struct drm_mm_node node;
-       bool is_mapped          :1;
+       /*
+        * Use a list for now. If searching a mapping ever becomes the
+        * bottleneck, we should consider using an RB-tree, or even better,
+        * let the core store drm_gem_object_mapping entries (where we
+        * could place driver specific data) instead of drm_gem_object ones
+        * in its drm_file->object_idr table.
+        *
+        * struct drm_gem_object_mapping {
+        *      struct drm_gem_object *obj;
+        *      void *driver_priv;
+        * };
+        */
+       struct {
+               struct list_head list;
+               struct mutex lock;
+       } mappings;
+
        bool noexec             :1;
        bool is_heap            :1;
 };
 
+struct panfrost_gem_mapping {
+       struct list_head node;
+       struct kref refcount;
+       struct panfrost_gem_object *obj;
+       struct drm_mm_node mmnode;
+       struct panfrost_mmu *mmu;
+       bool active             :1;
+};
+
 static inline
 struct  panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
 {
        return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
 }
 
-static inline
-struct  panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
 {
-       return container_of(node, struct panfrost_gem_object, node);
+       return container_of(node, struct panfrost_gem_mapping, mmnode);
 }
 
 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
 void panfrost_gem_close(struct drm_gem_object *obj,
                        struct drm_file *file_priv);
 
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+                        struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
 void panfrost_gem_shrinker_init(struct drm_device *dev);
 void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
 
index 458f0fa..f5dd7b2 100644 (file)
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
 static bool panfrost_gem_purge(struct drm_gem_object *obj)
 {
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
 
        if (!mutex_trylock(&shmem->pages_lock))
                return false;
 
-       panfrost_mmu_unmap(to_panfrost_bo(obj));
+       panfrost_gem_teardown_mappings(bo);
        drm_gem_shmem_purge_locked(obj);
 
        mutex_unlock(&shmem->pages_lock);
index d411eb6..e364ee0 100644 (file)
@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
        dma_fence_put(job->done_fence);
        dma_fence_put(job->render_done_fence);
 
-       if (job->bos) {
+       if (job->mappings) {
                for (i = 0; i < job->bo_count; i++)
+                       panfrost_gem_mapping_put(job->mappings[i]);
+               kvfree(job->mappings);
+       }
+
+       if (job->bos) {
+               struct panfrost_gem_object *bo;
+
+               for (i = 0; i < job->bo_count; i++) {
+                       bo = to_panfrost_bo(job->bos[i]);
                        drm_gem_object_put_unlocked(job->bos[i]);
+               }
+
                kvfree(job->bos);
        }
 
index 6245412..bbd3ba9 100644 (file)
@@ -32,6 +32,7 @@ struct panfrost_job {
 
        /* Exclusive fences we have taken from the BOs to wait for */
        struct dma_fence **implicit_fences;
+       struct panfrost_gem_mapping **mappings;
        struct drm_gem_object **bos;
        u32 bo_count;
 
index a3ed64a..763cfca 100644 (file)
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
        return 0;
 }
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
 {
+       struct panfrost_gem_object *bo = mapping->obj;
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
        struct sg_table *sgt;
        int prot = IOMMU_READ | IOMMU_WRITE;
 
-       if (WARN_ON(bo->is_mapped))
+       if (WARN_ON(mapping->active))
                return 0;
 
        if (bo->noexec)
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
        if (WARN_ON(IS_ERR(sgt)))
                return PTR_ERR(sgt);
 
-       mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
-       bo->is_mapped = true;
+       mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+                  prot, sgt);
+       mapping->active = true;
 
        return 0;
 }
 
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
 {
+       struct panfrost_gem_object *bo = mapping->obj;
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
-       struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
-       u64 iova = bo->node.start << PAGE_SHIFT;
-       size_t len = bo->node.size << PAGE_SHIFT;
+       struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+       u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+       size_t len = mapping->mmnode.size << PAGE_SHIFT;
        size_t unmapped_len = 0;
 
-       if (WARN_ON(!bo->is_mapped))
+       if (WARN_ON(!mapping->active))
                return;
 
-       dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+       dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+               mapping->mmu->as, iova, len);
 
        while (unmapped_len < len) {
                size_t unmapped_page;
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
                unmapped_len += pgsize;
        }
 
-       panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
-       bo->is_mapped = false;
+       panfrost_mmu_flush_range(pfdev, mapping->mmu,
+                                mapping->mmnode.start << PAGE_SHIFT, len);
+       mapping->active = false;
 }
 
 static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
        free_io_pgtable_ops(mmu->pgtbl_ops);
 }
 
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
 {
-       struct panfrost_gem_object *bo = NULL;
+       struct panfrost_gem_mapping *mapping = NULL;
        struct panfrost_file_priv *priv;
        struct drm_mm_node *node;
        u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +423,9 @@ found_mmu:
        drm_mm_for_each_node(node, &priv->mm) {
                if (offset >= node->start &&
                    offset < (node->start + node->size)) {
-                       bo = drm_mm_node_to_panfrost_bo(node);
-                       drm_gem_object_get(&bo->base.base);
+                       mapping = drm_mm_node_to_panfrost_mapping(node);
+
+                       kref_get(&mapping->refcount);
                        break;
                }
        }
@@ -427,7 +433,7 @@ found_mmu:
        spin_unlock(&priv->mm_lock);
 out:
        spin_unlock(&pfdev->as_lock);
-       return bo;
+       return mapping;
 }
 
 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                                       u64 addr)
 {
        int ret, i;
+       struct panfrost_gem_mapping *bomapping;
        struct panfrost_gem_object *bo;
        struct address_space *mapping;
        pgoff_t page_offset;
        struct sg_table *sgt;
        struct page **pages;
 
-       bo = addr_to_drm_mm_node(pfdev, as, addr);
-       if (!bo)
+       bomapping = addr_to_mapping(pfdev, as, addr);
+       if (!bomapping)
                return -ENOENT;
 
+       bo = bomapping->obj;
        if (!bo->is_heap) {
                dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
-                        bo->node.start << PAGE_SHIFT);
+                        bomapping->mmnode.start << PAGE_SHIFT);
                ret = -EINVAL;
                goto err_bo;
        }
-       WARN_ON(bo->mmu->as != as);
+       WARN_ON(bomapping->mmu->as != as);
 
        /* Assume 2MB alignment and size multiple */
        addr &= ~((u64)SZ_2M - 1);
        page_offset = addr >> PAGE_SHIFT;
-       page_offset -= bo->node.start;
+       page_offset -= bomapping->mmnode.start;
 
        mutex_lock(&bo->base.pages_lock);
 
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                goto err_map;
        }
 
-       mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+       mmu_map_sg(pfdev, bomapping->mmu, addr,
+                  IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
 
-       bo->is_mapped = true;
+       bomapping->active = true;
 
        dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
 
-       drm_gem_object_put_unlocked(&bo->base.base);
+       panfrost_gem_mapping_put(bomapping);
 
        return 0;
 
index 7c5b677..44fc2ed 100644 (file)
@@ -4,12 +4,12 @@
 #ifndef __PANFROST_MMU_H__
 #define __PANFROST_MMU_H__
 
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
 struct panfrost_file_priv;
 struct panfrost_mmu;
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
 
 int panfrost_mmu_init(struct panfrost_device *pfdev);
 void panfrost_mmu_fini(struct panfrost_device *pfdev);
index 2c04e85..6848204 100644 (file)
@@ -25,7 +25,7 @@
 #define V4_SHADERS_PER_COREGROUP       4
 
 struct panfrost_perfcnt {
-       struct panfrost_gem_object *bo;
+       struct panfrost_gem_mapping *mapping;
        size_t bosize;
        void *buf;
        struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
        int ret;
 
        reinit_completion(&pfdev->perfcnt->dump_comp);
-       gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+       gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
        gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
        gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
        gpu_write(pfdev, GPU_INT_CLEAR,
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
-       perfcnt->bo = to_panfrost_bo(&bo->base);
-
        /* Map the perfcnt buf in the address space attached to file_priv. */
-       ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+       ret = panfrost_gem_open(&bo->base, file_priv);
        if (ret)
                goto err_put_bo;
 
+       perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+                                                   user);
+       if (!perfcnt->mapping) {
+               ret = -EINVAL;
+               goto err_close_bo;
+       }
+
        perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
        if (IS_ERR(perfcnt->buf)) {
                ret = PTR_ERR(perfcnt->buf);
-               goto err_close_bo;
+               goto err_put_mapping;
        }
 
        /*
@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
        if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
                gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
 
+       /* The BO ref is retained by the mapping. */
+       drm_gem_object_put_unlocked(&bo->base);
+
        return 0;
 
 err_vunmap:
-       drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+       drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+       panfrost_gem_mapping_put(perfcnt->mapping);
 err_close_bo:
-       panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+       panfrost_gem_close(&bo->base, file_priv);
 err_put_bo:
        drm_gem_object_put_unlocked(&bo->base);
        return ret;
@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
                  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
 
        perfcnt->user = NULL;
-       drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+       drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
        perfcnt->buf = NULL;
-       panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
-       drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
-       perfcnt->bo = NULL;
+       panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+       panfrost_gem_mapping_put(perfcnt->mapping);
+       perfcnt->mapping = NULL;
        pm_runtime_mark_last_busy(pfdev->dev);
        pm_runtime_put_autosuspend(pfdev->dev);
 
index cd91930..70e1cb9 100644 (file)
@@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click,
 #define HIDPP_REPORT_LONG_LENGTH               20
 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH      64
 
+#define HIDPP_REPORT_SHORT_SUPPORTED           BIT(0)
+#define HIDPP_REPORT_LONG_SUPPORTED            BIT(1)
+#define HIDPP_REPORT_VERY_LONG_SUPPORTED       BIT(2)
+
 #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS      0x03
 #define HIDPP_SUB_ID_ROLLER                    0x05
 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS          0x06
@@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
 #define HIDPP_CAPABILITY_HIDPP20_BATTERY       BIT(1)
 #define HIDPP_CAPABILITY_BATTERY_MILEAGE       BIT(2)
 #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS  BIT(3)
+#define HIDPP_CAPABILITY_BATTERY_VOLTAGE       BIT(4)
 
 /*
  * There are two hidpp protocols in use, the first version hidpp10 is known
@@ -135,12 +140,15 @@ struct hidpp_report {
 struct hidpp_battery {
        u8 feature_index;
        u8 solar_feature_index;
+       u8 voltage_feature_index;
        struct power_supply_desc desc;
        struct power_supply *ps;
        char name[64];
        int status;
        int capacity;
        int level;
+       int voltage;
+       int charge_type;
        bool online;
 };
 
@@ -183,9 +191,12 @@ struct hidpp_device {
 
        unsigned long quirks;
        unsigned long capabilities;
+       u8 supported_reports;
 
        struct hidpp_battery battery;
        struct hidpp_scroll_counter vertical_wheel_counter;
+
+       u8 wireless_feature_index;
 };
 
 /* HID++ 1.0 error codes */
@@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
        struct hidpp_report *message;
        int ret, max_count;
 
+       /* Send as long report if short reports are not supported. */
+       if (report_id == REPORT_ID_HIDPP_SHORT &&
+           !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED))
+               report_id = REPORT_ID_HIDPP_LONG;
+
        switch (report_id) {
        case REPORT_ID_HIDPP_SHORT:
                max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
@@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question,
            (answer->fap.params[0] == question->fap.funcindex_clientid);
 }
 
-static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
+               struct hidpp_report *report)
 {
-       return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
-               (report->rap.sub_id == 0x41);
+       return (hidpp->wireless_feature_index &&
+               (report->fap.feature_index == hidpp->wireless_feature_index)) ||
+               ((report->report_id == REPORT_ID_HIDPP_SHORT) &&
+               (report->rap.sub_id == 0x41));
 }
 
 /**
@@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp,
        return 0;
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x1001: Battery voltage                                                    */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001
+
+#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00
+
+#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00
+
+static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+                                               int *level, int *charge_type)
+{
+       int status;
+
+       long charge_sts = (long)data[2];
+
+       *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+       switch (data[2] & 0xe0) {
+       case 0x00:
+               status = POWER_SUPPLY_STATUS_CHARGING;
+               break;
+       case 0x20:
+               status = POWER_SUPPLY_STATUS_FULL;
+               *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+               break;
+       case 0x40:
+               status = POWER_SUPPLY_STATUS_DISCHARGING;
+               break;
+       case 0xe0:
+               status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               break;
+       default:
+               status = POWER_SUPPLY_STATUS_UNKNOWN;
+       }
+
+       *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+       if (test_bit(3, &charge_sts)) {
+               *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+       }
+       if (test_bit(4, &charge_sts)) {
+               *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+       }
+
+       if (test_bit(5, &charge_sts)) {
+               *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+       }
+
+       *voltage = get_unaligned_be16(data);
+
+       return status;
+}
+
+static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
+                                                u8 feature_index,
+                                                int *status, int *voltage,
+                                                int *level, int *charge_type)
+{
+       struct hidpp_report response;
+       int ret;
+       u8 *params = (u8 *)response.fap.params;
+
+       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+                                         CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE,
+                                         NULL, 0, &response);
+
+       if (ret > 0) {
+               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+                       __func__, ret);
+               return -EPROTO;
+       }
+       if (ret)
+               return ret;
+
+       hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE;
+
+       *status = hidpp20_battery_map_status_voltage(params, voltage,
+                                                    level, charge_type);
+
+       return 0;
+}
+
+static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
+{
+       u8 feature_type;
+       int ret;
+       int status, voltage, level, charge_type;
+
+       if (hidpp->battery.voltage_feature_index == 0xff) {
+               ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
+                                            &hidpp->battery.voltage_feature_index,
+                                            &feature_type);
+               if (ret)
+                       return ret;
+       }
+
+       ret = hidpp20_battery_get_battery_voltage(hidpp,
+                                                 hidpp->battery.voltage_feature_index,
+                                                 &status, &voltage, &level, &charge_type);
+
+       if (ret)
+               return ret;
+
+       hidpp->battery.status = status;
+       hidpp->battery.voltage = voltage;
+       hidpp->battery.level = level;
+       hidpp->battery.charge_type = charge_type;
+       hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+       return 0;
+}
+
+static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
+                                           u8 *data, int size)
+{
+       struct hidpp_report *report = (struct hidpp_report *)data;
+       int status, voltage, level, charge_type;
+
+       if (report->fap.feature_index != hidpp->battery.voltage_feature_index ||
+               report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST)
+               return 0;
+
+       status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage,
+                                                   &level, &charge_type);
+
+       hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+       if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
+               hidpp->battery.voltage = voltage;
+               hidpp->battery.status = status;
+               hidpp->battery.level = level;
+               hidpp->battery.charge_type = charge_type;
+               if (hidpp->battery.ps)
+                       power_supply_changed(hidpp->battery.ps);
+       }
+       return 0;
+}
+
 static enum power_supply_property hidpp_battery_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
        POWER_SUPPLY_PROP_STATUS,
@@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
        0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */
        0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */
+       0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */
 };
 
 static int hidpp_battery_get_property(struct power_supply *psy,
@@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy,
                case POWER_SUPPLY_PROP_SERIAL_NUMBER:
                        val->strval = hidpp->hid_dev->uniq;
                        break;
+               case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+                       /* hardware reports voltage in in mV. sysfs expects uV */
+                       val->intval = hidpp->battery.voltage * 1000;
+                       break;
+               case POWER_SUPPLY_PROP_CHARGE_TYPE:
+                       val->intval = hidpp->battery.charge_type;
+                       break;
                default:
                        ret = -EINVAL;
                        break;
@@ -1276,6 +1441,24 @@ static int hidpp_battery_get_property(struct power_supply *psy,
        return ret;
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x1d4b: Wireless device status                                             */
+/* -------------------------------------------------------------------------- */
+#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS                      0x1d4b
+
+static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
+{
+       u8 feature_type;
+       int ret;
+
+       ret = hidpp_root_get_feature(hidpp,
+                                    HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+                                    &hidpp->wireless_feature_index,
+                                    &feature_type);
+
+       return ret;
+}
+
 /* -------------------------------------------------------------------------- */
 /* 0x2120: Hi-resolution scrolling                                            */
 /* -------------------------------------------------------------------------- */
@@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
                }
        }
 
-       if (unlikely(hidpp_report_is_connect_event(report))) {
+       if (unlikely(hidpp_report_is_connect_event(hidpp, report))) {
                atomic_set(&hidpp->connected,
                                !(report->rap.params[0] & (1 << 6)));
                if (schedule_work(&hidpp->work) == 0)
@@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
                ret = hidpp_solar_battery_event(hidpp, data, size);
                if (ret != 0)
                        return ret;
+               ret = hidpp20_battery_voltage_event(hidpp, data, size);
+               if (ret != 0)
+                       return ret;
        }
 
        if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
 
        hidpp->battery.feature_index = 0xff;
        hidpp->battery.solar_feature_index = 0xff;
+       hidpp->battery.voltage_feature_index = 0xff;
 
        if (hidpp->protocol_major >= 2) {
                if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
                        ret = hidpp_solar_request_battery_event(hidpp);
-               else
-                       ret = hidpp20_query_battery_info(hidpp);
+               else {
+                       ret = hidpp20_query_battery_voltage_info(hidpp);
+                       if (ret)
+                               ret = hidpp20_query_battery_info(hidpp);
+               }
 
                if (ret)
                        return ret;
@@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
        if (!battery_props)
                return -ENOMEM;
 
-       num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
+       num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
 
        if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
                battery_props[num_battery_props++] =
@@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
                battery_props[num_battery_props++] =
                                POWER_SUPPLY_PROP_CAPACITY_LEVEL;
 
+       if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+               battery_props[num_battery_props++] =
+                       POWER_SUPPLY_PROP_VOLTAGE_NOW;
+
        battery = &hidpp->battery;
 
        n = atomic_inc_return(&battery_no) - 1;
@@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
                else
                        hidpp10_query_battery_status(hidpp);
        } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
-               hidpp20_query_battery_info(hidpp);
+               if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+                       hidpp20_query_battery_voltage_info(hidpp);
+               else
+                       hidpp20_query_battery_info(hidpp);
        }
        if (hidpp->battery.ps)
                power_supply_changed(hidpp->battery.ps);
@@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
        return report->field[0]->report_count + 1;
 }
 
-static bool hidpp_validate_device(struct hid_device *hdev)
+static u8 hidpp_validate_device(struct hid_device *hdev)
 {
        struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-       int id, report_length, supported_reports = 0;
+       int id, report_length;
+       u8 supported_reports = 0;
 
        id = REPORT_ID_HIDPP_SHORT;
        report_length = hidpp_get_report_length(hdev, id);
@@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
                if (report_length < HIDPP_REPORT_SHORT_LENGTH)
                        goto bad_device;
 
-               supported_reports++;
+               supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED;
        }
 
        id = REPORT_ID_HIDPP_LONG;
@@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
                if (report_length < HIDPP_REPORT_LONG_LENGTH)
                        goto bad_device;
 
-               supported_reports++;
+               supported_reports |= HIDPP_REPORT_LONG_SUPPORTED;
        }
 
        id = REPORT_ID_HIDPP_VERY_LONG;
@@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
                    report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
                        goto bad_device;
 
-               supported_reports++;
+               supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED;
                hidpp->very_long_report_length = report_length;
        }
 
@@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        /*
         * Make sure the device is HID++ capable, otherwise treat as generic HID
         */
-       if (!hidpp_validate_device(hdev)) {
+       hidpp->supported_reports = hidpp_validate_device(hdev);
+
+       if (!hidpp->supported_reports) {
                hid_set_drvdata(hdev, NULL);
                devm_kfree(&hdev->dev, hidpp);
                return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
@@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (ret < 0) {
                dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
                        __func__, ret);
-               hid_hw_stop(hdev);
                goto hid_hw_open_fail;
        }
 
@@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                hidpp_overwrite_name(hdev);
        }
 
+       if (connected && hidpp->protocol_major >= 2) {
+               ret = hidpp_set_wireless_feature_index(hidpp);
+               if (ret == -ENOENT)
+                       hidpp->wireless_feature_index = 0;
+               else if (ret)
+                       goto hid_hw_init_fail;
+       }
+
        if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
                ret = wtp_get_config(hidpp);
                if (ret)
@@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = {
        { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        { /* Mouse Logitech MX Master 2S */
          LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech MX Master 3 */
+         LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        { /* Mouse Logitech Performance MX */
          LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
        { /* Keyboard logitech K400 */
@@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* MX5500 keyboard over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
          .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+       { /* MX Master mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+         .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+         .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* MX Master 3 mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
+         .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        {}
 };
 
index 7a75aff..2eee5e3 100644 (file)
@@ -451,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
                                                -EFAULT : len;
                                        break;
                                }
+
+                               if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+                                       int len = strlen(hid->uniq) + 1;
+                                       if (len > _IOC_SIZE(cmd))
+                                               len = _IOC_SIZE(cmd);
+                                       ret = copy_to_user(user_arg, hid->uniq, len) ?
+                                               -EFAULT : len;
+                                       break;
+                               }
                        }
 
                ret = -ENOTTY;
index 23dfe84..47ac20a 100644 (file)
@@ -164,6 +164,16 @@ config SENSORS_ADM1031
          This driver can also be built as a module. If so, the module
          will be called adm1031.
 
+config SENSORS_ADM1177
+       tristate "Analog Devices ADM1177 and compatibles"
+       depends on I2C
+       help
+         If you say yes here you get support for Analog Devices ADM1177
+         sensor chips.
+
+         This driver can also be built as a module.  If so, the module
+         will be called adm1177.
+
 config SENSORS_ADM9240
        tristate "Analog Devices ADM9240 and compatibles"
        depends on I2C
@@ -385,6 +395,16 @@ config SENSORS_ATXP1
          This driver can also be built as a module. If so, the module
          will be called atxp1.
 
+config SENSORS_DRIVETEMP
+       tristate "Hard disk drives with temperature sensors"
+       depends on SCSI && ATA
+       help
+         If you say yes you get support for the temperature sensor on
+         hard disk drives.
+
+         This driver can also be built as a module. If so, the module
+         will be called satatemp.
+
 config SENSORS_DS620
        tristate "Dallas Semiconductor DS620"
        depends on I2C
@@ -889,7 +909,7 @@ config SENSORS_MAX197
          will be called max197.
 
 config SENSORS_MAX31722
-tristate "MAX31722 temperature sensor"
+       tristate "MAX31722 temperature sensor"
        depends on SPI
        help
          Support for the Maxim Integrated MAX31722/MAX31723 digital
@@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor"
          This driver can also be built as a module. If so, the module
          will be called max31722.
 
+config SENSORS_MAX31730
+       tristate "MAX31730 temperature sensor"
+       depends on I2C
+       help
+         Support for the Maxim Integrated MAX31730 3-Channel Remote
+         Temperature Sensor.
+
+         This driver can also be built as a module. If so, the module
+         will be called max31730.
+
 config SENSORS_MAX6621
        tristate "Maxim MAX6621 sensor chip"
        depends on I2C
@@ -1905,7 +1935,7 @@ config SENSORS_W83627HF
          will be called w83627hf.
 
 config SENSORS_W83627EHF
-       tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+       tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
        depends on !PPC
        select HWMON_VID
        help
@@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF
          the Core 2 Duo. And also the W83627UHG, which is a stripped down
          version of the W83627DHG (as far as hardware monitoring goes.)
 
-         This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
-         (also known as W83667HG-I), and NCT6776F.
+         This driver also supports Nuvoton W83667HG and W83667HG-B.
 
          This driver can also be built as a module. If so, the module
          will be called w83627ehf.
index 6db5db9..613f509 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
 obj-$(CONFIG_SENSORS_ADM1026)  += adm1026.o
 obj-$(CONFIG_SENSORS_ADM1029)  += adm1029.o
 obj-$(CONFIG_SENSORS_ADM1031)  += adm1031.o
+obj-$(CONFIG_SENSORS_ADM1177)  += adm1177.o
 obj-$(CONFIG_SENSORS_ADM9240)  += adm9240.o
 obj-$(CONFIG_SENSORS_ADS7828)  += ads7828.o
 obj-$(CONFIG_SENSORS_ADS7871)  += ads7871.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
 obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
 obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
 obj-$(CONFIG_SENSORS_DME1737)  += dme1737.o
+obj-$(CONFIG_SENSORS_DRIVETEMP)        += drivetemp.o
 obj-$(CONFIG_SENSORS_DS620)    += ds620.o
 obj-$(CONFIG_SENSORS_DS1621)   += ds1621.o
 obj-$(CONFIG_SENSORS_EMC1403)  += emc1403.o
@@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619)       += max1619.o
 obj-$(CONFIG_SENSORS_MAX1668)  += max1668.o
 obj-$(CONFIG_SENSORS_MAX197)   += max197.o
 obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
 obj-$(CONFIG_SENSORS_MAX6621)  += max6621.o
 obj-$(CONFIG_SENSORS_MAX6639)  += max6639.o
 obj-$(CONFIG_SENSORS_MAX6642)  += max6642.o
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
new file mode 100644 (file)
index 0000000..d314223
--- /dev/null
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin
+ *
+ * Copyright 2015-2019 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+/*  Command Byte Operations */
+#define ADM1177_CMD_V_CONT     BIT(0)
+#define ADM1177_CMD_I_CONT     BIT(2)
+#define ADM1177_CMD_VRANGE     BIT(4)
+
+/* Extended Register */
+#define ADM1177_REG_ALERT_TH   2
+
+#define ADM1177_BITS           12
+
+/**
+ * struct adm1177_state - driver instance specific data
+ * @client             pointer to i2c client
+ * @reg                        regulator info for the the power supply of the device
+ * @r_sense_uohm       current sense resistor value
+ * @alert_threshold_ua current limit for shutdown
+ * @vrange_high                internal voltage divider
+ */
+struct adm1177_state {
+       struct i2c_client       *client;
+       struct regulator        *reg;
+       u32                     r_sense_uohm;
+       u32                     alert_threshold_ua;
+       bool                    vrange_high;
+};
+
+static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data)
+{
+       return i2c_master_recv(st->client, data, num);
+}
+
+static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd)
+{
+       return i2c_smbus_write_byte(st->client, cmd);
+}
+
+static int adm1177_write_alert_thr(struct adm1177_state *st,
+                                  u32 alert_threshold_ua)
+{
+       u64 val;
+       int ret;
+
+       val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm;
+       val = div_u64(val, 105840000U);
+       val = div_u64(val, 1000U);
+       if (val > 0xFF)
+               val = 0xFF;
+
+       ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH,
+                                       val);
+       if (ret)
+               return ret;
+
+       st->alert_threshold_ua = alert_threshold_ua;
+       return 0;
+}
+
+static int adm1177_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       struct adm1177_state *st = dev_get_drvdata(dev);
+       u8 data[3];
+       long dummy;
+       int ret;
+
+       switch (type) {
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_input:
+                       ret = adm1177_read_raw(st, 3, data);
+                       if (ret < 0)
+                               return ret;
+                       dummy = (data[1] << 4) | (data[2] & 0xF);
+                       /*
+                        * convert to milliamperes
+                        * ((105.84mV / 4096) x raw) / senseResistor(ohm)
+                        */
+                       *val = div_u64((105840000ull * dummy),
+                                      4096 * st->r_sense_uohm);
+                       return 0;
+               case hwmon_curr_max_alarm:
+                       *val = st->alert_threshold_ua;
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       case hwmon_in:
+               ret = adm1177_read_raw(st, 3, data);
+               if (ret < 0)
+                       return ret;
+               dummy = (data[0] << 4) | (data[2] >> 4);
+               /*
+                * convert to millivolts based on resistor devision
+                * (V_fullscale / 4096) * raw
+                */
+               if (st->vrange_high)
+                       dummy *= 26350;
+               else
+                       dummy *= 6650;
+
+               *val = DIV_ROUND_CLOSEST(dummy, 4096);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int adm1177_write(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long val)
+{
+       struct adm1177_state *st = dev_get_drvdata(dev);
+
+       switch (type) {
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_max_alarm:
+                       adm1177_write_alert_thr(st, val);
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static umode_t adm1177_is_visible(const void *data,
+                                 enum hwmon_sensor_types type,
+                                 u32 attr, int channel)
+{
+       const struct adm1177_state *st = data;
+
+       switch (type) {
+       case hwmon_in:
+               switch (attr) {
+               case hwmon_in_input:
+                       return 0444;
+               }
+               break;
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_input:
+                       if (st->r_sense_uohm)
+                               return 0444;
+                       return 0;
+               case hwmon_curr_max_alarm:
+                       if (st->r_sense_uohm)
+                               return 0644;
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static const struct hwmon_channel_info *adm1177_info[] = {
+       HWMON_CHANNEL_INFO(curr,
+                          HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+       HWMON_CHANNEL_INFO(in,
+                          HWMON_I_INPUT),
+       NULL
+};
+
+static const struct hwmon_ops adm1177_hwmon_ops = {
+       .is_visible = adm1177_is_visible,
+       .read = adm1177_read,
+       .write = adm1177_write,
+};
+
+static const struct hwmon_chip_info adm1177_chip_info = {
+       .ops = &adm1177_hwmon_ops,
+       .info = adm1177_info,
+};
+
+static void adm1177_remove(void *data)
+{
+       struct adm1177_state *st = data;
+
+       regulator_disable(st->reg);
+}
+
+static int adm1177_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       struct adm1177_state *st;
+       u32 alert_threshold_ua;
+       int ret;
+
+       st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return -ENOMEM;
+
+       st->client = client;
+
+       st->reg = devm_regulator_get_optional(&client->dev, "vref");
+       if (IS_ERR(st->reg)) {
+               if (PTR_ERR(st->reg) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               st->reg = NULL;
+       } else {
+               ret = regulator_enable(st->reg);
+               if (ret)
+                       return ret;
+               ret = devm_add_action_or_reset(&client->dev, adm1177_remove,
+                                              st);
+               if (ret)
+                       return ret;
+       }
+
+       if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+                                    &st->r_sense_uohm))
+               st->r_sense_uohm = 0;
+       if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp",
+                                    &alert_threshold_ua)) {
+               if (st->r_sense_uohm)
+                       /*
+                        * set maximum default value from datasheet based on
+                        * shunt-resistor
+                        */
+                       alert_threshold_ua = div_u64(105840000000,
+                                                    st->r_sense_uohm);
+               else
+                       alert_threshold_ua = 0;
+       }
+       st->vrange_high = device_property_read_bool(dev,
+                                                   "adi,vrange-high-enable");
+       if (alert_threshold_ua && st->r_sense_uohm)
+               adm1177_write_alert_thr(st, alert_threshold_ua);
+
+       ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT |
+                                   ADM1177_CMD_I_CONT |
+                                   (st->vrange_high ? 0 : ADM1177_CMD_VRANGE));
+       if (ret)
+               return ret;
+
+       hwmon_dev =
+               devm_hwmon_device_register_with_info(dev, client->name, st,
+                                                    &adm1177_chip_info, NULL);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id adm1177_id[] = {
+       {"adm1177", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, adm1177_id);
+
+static const struct of_device_id adm1177_dt_ids[] = {
+       { .compatible = "adi,adm1177" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
+
+static struct i2c_driver adm1177_driver = {
+       .class = I2C_CLASS_HWMON,
+       .driver = {
+               .name = "adm1177",
+               .of_match_table = adm1177_dt_ids,
+       },
+       .probe = adm1177_probe,
+       .id_table = adm1177_id,
+};
+module_i2c_driver(adm1177_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver");
+MODULE_LICENSE("GPL v2");
index 6c64d50..01c2eeb 100644 (file)
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
        long reg;
 
        if (bypass_attn & (1 << channel))
-               reg = (volt * 1024) / 2250;
+               reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
        else
-               reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+               reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+                                       (r[0] + r[1]) * 2250);
        return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
new file mode 100644 (file)
index 0000000..370d0c7
--- /dev/null
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hwmon client for disk and solid state drives with temperature sensors
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ *
+ * With input from:
+ *    Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors.
+ *    (C) 2018 Linus Walleij
+ *
+ *    hwmon: Driver for SCSI/ATA temperature sensors
+ *    by Constantin Baranov <const@mimas.ru>, submitted September 2009
+ *
+ * This drive supports reporting the temperatire of SATA drives. It can be
+ * easily extended to report the temperature of SCSI drives.
+ *
+ * The primary means to read drive temperatures and temperature limits
+ * for ATA drives is the SCT Command Transport feature set as specified in
+ * ATA8-ACS.
+ * It can be used to read the current drive temperature, temperature limits,
+ * and historic minimum and maximum temperatures. The SCT Command Transport
+ * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set
+ * (ATA8-ACS)".
+ *
+ * If the SCT Command Transport feature set is not available, drive temperatures
+ * may be readable through SMART attributes. Since SMART attributes are not well
+ * defined, this method is only used as fallback mechanism.
+ *
+ * There are three SMART attributes which may report drive temperatures.
+ * Those are defined as follows (from
+ * http://www.cropel.com/library/smart-attribute-list.aspx).
+ *
+ * 190 Temperature     Temperature, monitored by a sensor somewhere inside
+ *                     the drive. Raw value typicaly holds the actual
+ *                     temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 194 Temperature     Temperature, monitored by a sensor somewhere inside
+ *                     the drive. Raw value typicaly holds the actual
+ *                     temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 231 Temperature     Temperature, monitored by a sensor somewhere inside
+ *                     the drive. Raw value typicaly holds the actual
+ *                     temperature (hexadecimal) in its rightmost two digits.
+ *
+ * Wikipedia defines attributes a bit differently.
+ *
+ * 190 Temperature     Value is equal to (100-temp. °C), allowing manufacturer
+ *     Difference or   to set a minimum threshold which corresponds to a
+ *     Airflow         maximum temperature. This also follows the convention of
+ *     Temperature     100 being a best-case value and lower values being
+ *                     undesirable. However, some older drives may instead
+ *                     report raw Temperature (identical to 0xC2) or
+ *                     Temperature minus 50 here.
+ * 194 Temperature or  Indicates the device temperature, if the appropriate
+ *     Temperature     sensor is fitted. Lowest byte of the raw value contains
+ *     Celsius         the exact temperature value (Celsius degrees).
+ * 231 Life Left       Indicates the approximate SSD life left, in terms of
+ *     (SSDs) or       program/erase cycles or available reserved blocks.
+ *     Temperature     A normalized value of 100 represents a new drive, with
+ *                     a threshold value at 10 indicating a need for
+ *                     replacement. A value of 0 may mean that the drive is
+ *                     operating in read-only mode to allow data recovery.
+ *                     Previously (pre-2010) occasionally used for Drive
+ *                     Temperature (more typically reported at 0xC2).
+ *
+ * Common denominator is that the first raw byte reports the temperature
+ * in degrees C on almost all drives. Some drives may report a fractional
+ * temperature in the second raw byte.
+ *
+ * Known exceptions (from libatasmart):
+ * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th
+ *   degrees C in the first two raw bytes.
+ * - A few Maxtor drives report an unknown or bad value in attribute 194.
+ * - Certain Apple SSD drives report an unknown value in attribute 190.
+ *   Only certain firmware versions are affected.
+ *
+ * Those exceptions affect older ATA drives and are currently ignored.
+ * Also, the second raw byte (possibly reporting the fractional temperature)
+ * is currently ignored.
+ *
+ * Many drives also report temperature limits in additional SMART data raw
+ * bytes. The format of those is not well defined and varies widely.
+ * The driver does not currently attempt to report those limits.
+ *
+ * According to data in smartmontools, attribute 231 is rarely used to report
+ * drive temperatures. At the same time, several drives report SSD life left
+ * in attribute 231, but do not support temperature sensors. For this reason,
+ * attribute 231 is currently ignored.
+ *
+ * Following above definitions, temperatures are reported as follows.
+ *   If SCT Command Transport is supported, it is used to read the
+ *   temperature and, if available, temperature limits.
+ * - Otherwise, if SMART attribute 194 is supported, it is used to read
+ *   the temperature.
+ * - Otherwise, if SMART attribute 190 is supported, it is used to read
+ *   the temperature.
+ */
+
+#include <linux/ata.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_proto.h>
+
+struct drivetemp_data {
+       struct list_head list;          /* list of instantiated devices */
+       struct mutex lock;              /* protect data buffer accesses */
+       struct scsi_device *sdev;       /* SCSI device */
+       struct device *dev;             /* instantiating device */
+       struct device *hwdev;           /* hardware monitoring device */
+       u8 smartdata[ATA_SECT_SIZE];    /* local buffer */
+       int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val);
+       bool have_temp_lowest;          /* lowest temp in SCT status */
+       bool have_temp_highest;         /* highest temp in SCT status */
+       bool have_temp_min;             /* have min temp */
+       bool have_temp_max;             /* have max temp */
+       bool have_temp_lcrit;           /* have lower critical limit */
+       bool have_temp_crit;            /* have critical limit */
+       int temp_min;                   /* min temp */
+       int temp_max;                   /* max temp */
+       int temp_lcrit;                 /* lower critical limit */
+       int temp_crit;                  /* critical limit */
+};
+
+static LIST_HEAD(drivetemp_devlist);
+
+#define ATA_MAX_SMART_ATTRS    30
+#define SMART_TEMP_PROP_190    190
+#define SMART_TEMP_PROP_194    194
+
+#define SCT_STATUS_REQ_ADDR    0xe0
+#define  SCT_STATUS_VERSION_LOW                0       /* log byte offsets */
+#define  SCT_STATUS_VERSION_HIGH       1
+#define  SCT_STATUS_TEMP               200
+#define  SCT_STATUS_TEMP_LOWEST                201
+#define  SCT_STATUS_TEMP_HIGHEST       202
+#define SCT_READ_LOG_ADDR      0xe1
+#define  SMART_READ_LOG                        0xd5
+#define  SMART_WRITE_LOG               0xd6
+
+#define INVALID_TEMP           0x80
+
+#define temp_is_valid(temp)    ((temp) != INVALID_TEMP)
+#define temp_from_sct(temp)    (((s8)(temp)) * 1000)
+
+static inline bool ata_id_smart_supported(u16 *id)
+{
+       return id[ATA_ID_COMMAND_SET_1] & BIT(0);
+}
+
+static inline bool ata_id_smart_enabled(u16 *id)
+{
+       return id[ATA_ID_CFS_ENABLE_1] & BIT(0);
+}
+
+static int drivetemp_scsi_command(struct drivetemp_data *st,
+                                u8 ata_command, u8 feature,
+                                u8 lba_low, u8 lba_mid, u8 lba_high)
+{
+       u8 scsi_cmd[MAX_COMMAND_SIZE];
+       int data_dir;
+
+       memset(scsi_cmd, 0, sizeof(scsi_cmd));
+       scsi_cmd[0] = ATA_16;
+       if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) {
+               scsi_cmd[1] = (5 << 1); /* PIO Data-out */
+               /*
+                * No off.line or cc, write to dev, block count in sector count
+                * field.
+                */
+               scsi_cmd[2] = 0x06;
+               data_dir = DMA_TO_DEVICE;
+       } else {
+               scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+               /*
+                * No off.line or cc, read from dev, block count in sector count
+                * field.
+                */
+               scsi_cmd[2] = 0x0e;
+               data_dir = DMA_FROM_DEVICE;
+       }
+       scsi_cmd[4] = feature;
+       scsi_cmd[6] = 1;        /* 1 sector */
+       scsi_cmd[8] = lba_low;
+       scsi_cmd[10] = lba_mid;
+       scsi_cmd[12] = lba_high;
+       scsi_cmd[14] = ata_command;
+
+       return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
+                               st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
+                               NULL);
+}
+
+static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+                                u8 select)
+{
+       return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select,
+                                    ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS);
+}
+
+static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr,
+                                 long *temp)
+{
+       u8 *buf = st->smartdata;
+       bool have_temp = false;
+       u8 temp_raw;
+       u8 csum;
+       int err;
+       int i;
+
+       err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0);
+       if (err)
+               return err;
+
+       /* Checksum the read value table */
+       csum = 0;
+       for (i = 0; i < ATA_SECT_SIZE; i++)
+               csum += buf[i];
+       if (csum) {
+               dev_dbg(&st->sdev->sdev_gendev,
+                       "checksum error reading SMART values\n");
+               return -EIO;
+       }
+
+       for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) {
+               u8 *attr = buf + i * 12;
+               int id = attr[2];
+
+               if (!id)
+                       continue;
+
+               if (id == SMART_TEMP_PROP_190) {
+                       temp_raw = attr[7];
+                       have_temp = true;
+               }
+               if (id == SMART_TEMP_PROP_194) {
+                       temp_raw = attr[7];
+                       have_temp = true;
+                       break;
+               }
+       }
+
+       if (have_temp) {
+               *temp = temp_raw * 1000;
+               return 0;
+       }
+
+       return -ENXIO;
+}
+
+static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+{
+       u8 *buf = st->smartdata;
+       int err;
+
+       err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+       if (err)
+               return err;
+       switch (attr) {
+       case hwmon_temp_input:
+               *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+               break;
+       case hwmon_temp_lowest:
+               *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+               break;
+       case hwmon_temp_highest:
+               *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int drivetemp_identify_sata(struct drivetemp_data *st)
+{
+       struct scsi_device *sdev = st->sdev;
+       u8 *buf = st->smartdata;
+       struct scsi_vpd *vpd;
+       bool is_ata, is_sata;
+       bool have_sct_data_table;
+       bool have_sct_temp;
+       bool have_smart;
+       bool have_sct;
+       u16 *ata_id;
+       u16 version;
+       long temp;
+       int err;
+
+       /* SCSI-ATA Translation present? */
+       rcu_read_lock();
+       vpd = rcu_dereference(sdev->vpd_pg89);
+
+       /*
+        * Verify that ATA IDENTIFY DEVICE data is included in ATA Information
+        * VPD and that the drive implements the SATA protocol.
+        */
+       if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA ||
+           vpd->data[36] != 0x34) {
+               rcu_read_unlock();
+               return -ENODEV;
+       }
+       ata_id = (u16 *)&vpd->data[60];
+       is_ata = ata_id_is_ata(ata_id);
+       is_sata = ata_id_is_sata(ata_id);
+       have_sct = ata_id_sct_supported(ata_id);
+       have_sct_data_table = ata_id_sct_data_tables(ata_id);
+       have_smart = ata_id_smart_supported(ata_id) &&
+                               ata_id_smart_enabled(ata_id);
+
+       rcu_read_unlock();
+
+       /* bail out if this is not a SATA device */
+       if (!is_ata || !is_sata)
+               return -ENODEV;
+       if (!have_sct)
+               goto skip_sct;
+
+       err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+       if (err)
+               goto skip_sct;
+
+       version = (buf[SCT_STATUS_VERSION_HIGH] << 8) |
+                 buf[SCT_STATUS_VERSION_LOW];
+       if (version != 2 && version != 3)
+               goto skip_sct;
+
+       have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]);
+       if (!have_sct_temp)
+               goto skip_sct;
+
+       st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]);
+       st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+       if (!have_sct_data_table)
+               goto skip_sct;
+
+       /* Request and read temperature history table */
+       memset(buf, '\0', sizeof(st->smartdata));
+       buf[0] = 5;     /* data table command */
+       buf[2] = 1;     /* read table */
+       buf[4] = 2;     /* temperature history table */
+
+       err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR);
+       if (err)
+               goto skip_sct_data;
+
+       err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR);
+       if (err)
+               goto skip_sct_data;
+
+       /*
+        * Temperature limits per AT Attachment 8 -
+        * ATA/ATAPI Command Set (ATA8-ACS)
+        */
+       st->have_temp_max = temp_is_valid(buf[6]);
+       st->have_temp_crit = temp_is_valid(buf[7]);
+       st->have_temp_min = temp_is_valid(buf[8]);
+       st->have_temp_lcrit = temp_is_valid(buf[9]);
+
+       st->temp_max = temp_from_sct(buf[6]);
+       st->temp_crit = temp_from_sct(buf[7]);
+       st->temp_min = temp_from_sct(buf[8]);
+       st->temp_lcrit = temp_from_sct(buf[9]);
+
+skip_sct_data:
+       if (have_sct_temp) {
+               st->get_temp = drivetemp_get_scttemp;
+               return 0;
+       }
+skip_sct:
+       if (!have_smart)
+               return -ENODEV;
+       st->get_temp = drivetemp_get_smarttemp;
+       return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp);
+}
+
+static int drivetemp_identify(struct drivetemp_data *st)
+{
+       struct scsi_device *sdev = st->sdev;
+
+       /* Bail out immediately if there is no inquiry data */
+       if (!sdev->inquiry || sdev->inquiry_len < 16)
+               return -ENODEV;
+
+       /* Disk device? */
+       if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC)
+               return -ENODEV;
+
+       return drivetemp_identify_sata(st);
+}
+
+static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long *val)
+{
+       struct drivetemp_data *st = dev_get_drvdata(dev);
+       int err = 0;
+
+       if (type != hwmon_temp)
+               return -EINVAL;
+
+       switch (attr) {
+       case hwmon_temp_input:
+       case hwmon_temp_lowest:
+       case hwmon_temp_highest:
+               mutex_lock(&st->lock);
+               err = st->get_temp(st, attr, val);
+               mutex_unlock(&st->lock);
+               break;
+       case hwmon_temp_lcrit:
+               *val = st->temp_lcrit;
+               break;
+       case hwmon_temp_min:
+               *val = st->temp_min;
+               break;
+       case hwmon_temp_max:
+               *val = st->temp_max;
+               break;
+       case hwmon_temp_crit:
+               *val = st->temp_crit;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static umode_t drivetemp_is_visible(const void *data,
+                                  enum hwmon_sensor_types type,
+                                  u32 attr, int channel)
+{
+       const struct drivetemp_data *st = data;
+
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+                       return 0444;
+               case hwmon_temp_lowest:
+                       if (st->have_temp_lowest)
+                               return 0444;
+                       break;
+               case hwmon_temp_highest:
+                       if (st->have_temp_highest)
+                               return 0444;
+                       break;
+               case hwmon_temp_min:
+                       if (st->have_temp_min)
+                               return 0444;
+                       break;
+               case hwmon_temp_max:
+                       if (st->have_temp_max)
+                               return 0444;
+                       break;
+               case hwmon_temp_lcrit:
+                       if (st->have_temp_lcrit)
+                               return 0444;
+                       break;
+               case hwmon_temp_crit:
+                       if (st->have_temp_crit)
+                               return 0444;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static const struct hwmon_channel_info *drivetemp_info[] = {
+       HWMON_CHANNEL_INFO(chip,
+                          HWMON_C_REGISTER_TZ),
+       HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+                          HWMON_T_LOWEST | HWMON_T_HIGHEST |
+                          HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_LCRIT | HWMON_T_CRIT),
+       NULL
+};
+
+static const struct hwmon_ops drivetemp_ops = {
+       .is_visible = drivetemp_is_visible,
+       .read = drivetemp_read,
+};
+
+static const struct hwmon_chip_info drivetemp_chip_info = {
+       .ops = &drivetemp_ops,
+       .info = drivetemp_info,
+};
+
+/*
+ * The device argument points to sdev->sdev_dev. Its parent is
+ * sdev->sdev_gendev, which we can use to get the scsi_device pointer.
+ */
+static int drivetemp_add(struct device *dev, struct class_interface *intf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev->parent);
+       struct drivetemp_data *st;
+       int err;
+
+       st = kzalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return -ENOMEM;
+
+       st->sdev = sdev;
+       st->dev = dev;
+       mutex_init(&st->lock);
+
+       if (drivetemp_identify(st)) {
+               err = -ENODEV;
+               goto abort;
+       }
+
+       st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp",
+                                                   st, &drivetemp_chip_info,
+                                                   NULL);
+       if (IS_ERR(st->hwdev)) {
+               err = PTR_ERR(st->hwdev);
+               goto abort;
+       }
+
+       list_add(&st->list, &drivetemp_devlist);
+       return 0;
+
+abort:
+       kfree(st);
+       return err;
+}
+
+static void drivetemp_remove(struct device *dev, struct class_interface *intf)
+{
+       struct drivetemp_data *st, *tmp;
+
+       list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) {
+               if (st->dev == dev) {
+                       list_del(&st->list);
+                       hwmon_device_unregister(st->hwdev);
+                       kfree(st);
+                       break;
+               }
+       }
+}
+
+static struct class_interface drivetemp_interface = {
+       .add_dev = drivetemp_add,
+       .remove_dev = drivetemp_remove,
+};
+
+static int __init drivetemp_init(void)
+{
+       return scsi_register_interface(&drivetemp_interface);
+}
+
+static void __exit drivetemp_exit(void)
+{
+       scsi_unregister_interface(&drivetemp_interface);
+}
+
+module_init(drivetemp_init);
+module_exit(drivetemp_exit);
+
+MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
+MODULE_DESCRIPTION("Hard drive temperature monitor");
+MODULE_LICENSE("GPL");
index 1f3b30b..6a30fb4 100644 (file)
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
 
 #define to_hwmon_attr(d) \
        container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
 
 /*
  * Thermal zone information
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
  * also provides the sensor index.
  */
 struct hwmon_thermal_data {
-       struct hwmon_device *hwdev;     /* Reference to hwmon device */
+       struct device *dev;             /* Reference to hwmon device */
        int index;                      /* sensor index */
 };
 
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
        NULL
 };
 
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+       int i;
+
+       for (i = 0; attrs[i]; i++) {
+               struct device_attribute *dattr = to_dev_attr(attrs[i]);
+               struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+               kfree(hattr);
+       }
+       kfree(attrs);
+}
+
 static void hwmon_dev_release(struct device *dev)
 {
-       kfree(to_hwmon_device(dev));
+       struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+       if (hwdev->group.attrs)
+               hwmon_free_attrs(hwdev->group.attrs);
+       kfree(hwdev->groups);
+       kfree(hwdev);
 }
 
 static struct class hwmon_class = {
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
 static int hwmon_thermal_get_temp(void *data, int *temp)
 {
        struct hwmon_thermal_data *tdata = data;
-       struct hwmon_device *hwdev = tdata->hwdev;
+       struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
        int ret;
        long t;
 
-       ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+       ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
                                     tdata->index, &t);
        if (ret < 0)
                return ret;
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
        .get_temp = hwmon_thermal_get_temp,
 };
 
-static int hwmon_thermal_add_sensor(struct device *dev,
-                                   struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
 {
        struct hwmon_thermal_data *tdata;
        struct thermal_zone_device *tzd;
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        if (!tdata)
                return -ENOMEM;
 
-       tdata->hwdev = hwdev;
+       tdata->dev = dev;
        tdata->index = index;
 
-       tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+       tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
                                                   &hwmon_thermal_ops);
        /*
         * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        return 0;
 }
 #else
-static int hwmon_thermal_add_sensor(struct device *dev,
-                                   struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
 {
        return 0;
 }
@@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
 
 static int hwmon_attr_base(enum hwmon_sensor_types type)
 {
-       if (type == hwmon_in)
+       if (type == hwmon_in || type == hwmon_intrusion)
                return 0;
        return 1;
 }
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
               (type == hwmon_fan && attr == hwmon_fan_label);
 }
 
-static struct attribute *hwmon_genattr(struct device *dev,
-                                      const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
                                       enum hwmon_sensor_types type,
                                       u32 attr,
                                       int index,
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
        if ((mode & 0222) && !ops->write)
                return ERR_PTR(-EINVAL);
 
-       hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+       hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
        if (!hattr)
                return ERR_PTR(-ENOMEM);
 
@@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = {
 };
 
 static const char * const hwmon_temp_attr_templates[] = {
+       [hwmon_temp_enable] = "temp%d_enable",
        [hwmon_temp_input] = "temp%d_input",
        [hwmon_temp_type] = "temp%d_type",
        [hwmon_temp_lcrit] = "temp%d_lcrit",
@@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = {
 };
 
 static const char * const hwmon_in_attr_templates[] = {
+       [hwmon_in_enable] = "in%d_enable",
        [hwmon_in_input] = "in%d_input",
        [hwmon_in_min] = "in%d_min",
        [hwmon_in_max] = "in%d_max",
@@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = {
        [hwmon_in_max_alarm] = "in%d_max_alarm",
        [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
        [hwmon_in_crit_alarm] = "in%d_crit_alarm",
-       [hwmon_in_enable] = "in%d_enable",
 };
 
 static const char * const hwmon_curr_attr_templates[] = {
+       [hwmon_curr_enable] = "curr%d_enable",
        [hwmon_curr_input] = "curr%d_input",
        [hwmon_curr_min] = "curr%d_min",
        [hwmon_curr_max] = "curr%d_max",
@@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = {
 };
 
 static const char * const hwmon_power_attr_templates[] = {
+       [hwmon_power_enable] = "power%d_enable",
        [hwmon_power_average] = "power%d_average",
        [hwmon_power_average_interval] = "power%d_average_interval",
        [hwmon_power_average_interval_max] = "power%d_interval_max",
@@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = {
 };
 
 static const char * const hwmon_energy_attr_templates[] = {
+       [hwmon_energy_enable] = "energy%d_enable",
        [hwmon_energy_input] = "energy%d_input",
        [hwmon_energy_label] = "energy%d_label",
 };
 
 static const char * const hwmon_humidity_attr_templates[] = {
+       [hwmon_humidity_enable] = "humidity%d_enable",
        [hwmon_humidity_input] = "humidity%d_input",
        [hwmon_humidity_label] = "humidity%d_label",
        [hwmon_humidity_min] = "humidity%d_min",
@@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = {
 };
 
 static const char * const hwmon_fan_attr_templates[] = {
+       [hwmon_fan_enable] = "fan%d_enable",
        [hwmon_fan_input] = "fan%d_input",
        [hwmon_fan_label] = "fan%d_label",
        [hwmon_fan_min] = "fan%d_min",
@@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = {
        [hwmon_pwm_freq] = "pwm%d_freq",
 };
 
+static const char * const hwmon_intrusion_attr_templates[] = {
+       [hwmon_intrusion_alarm] = "intrusion%d_alarm",
+       [hwmon_intrusion_beep]  = "intrusion%d_beep",
+};
+
 static const char * const *__templates[] = {
        [hwmon_chip] = hwmon_chip_attrs,
        [hwmon_temp] = hwmon_temp_attr_templates,
@@ -468,6 +495,7 @@ static const char * const *__templates[] = {
        [hwmon_humidity] = hwmon_humidity_attr_templates,
        [hwmon_fan] = hwmon_fan_attr_templates,
        [hwmon_pwm] = hwmon_pwm_attr_templates,
+       [hwmon_intrusion] = hwmon_intrusion_attr_templates,
 };
 
 static const int __templates_size[] = {
@@ -480,6 +508,7 @@ static const int __templates_size[] = {
        [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
        [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
        [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+       [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
 };
 
 static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
@@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
        return n;
 }
 
-static int hwmon_genattrs(struct device *dev,
-                         const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
                          struct attribute **attrs,
                          const struct hwmon_ops *ops,
                          const struct hwmon_channel_info *info)
@@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev,
                        attr_mask &= ~BIT(attr);
                        if (attr >= template_size)
                                return -EINVAL;
-                       a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+                       a = hwmon_genattr(drvdata, info->type, attr, i,
                                          templates[attr], ops);
                        if (IS_ERR(a)) {
                                if (PTR_ERR(a) != -ENOENT)
@@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev,
 }
 
 static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
-                    const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
 {
        int ret, i, aindex = 0, nattrs = 0;
        struct attribute **attrs;
@@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
        if (nattrs == 0)
                return ERR_PTR(-EINVAL);
 
-       attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+       attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
        if (!attrs)
                return ERR_PTR(-ENOMEM);
 
        for (i = 0; chip->info[i]; i++) {
-               ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+               ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
                                     chip->info[i]);
-               if (ret < 0)
+               if (ret < 0) {
+                       hwmon_free_attrs(attrs);
                        return ERR_PTR(ret);
+               }
                aindex += ret;
        }
 
@@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                        for (i = 0; groups[i]; i++)
                                ngroups++;
 
-               hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
-                                            GFP_KERNEL);
+               hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
                if (!hwdev->groups) {
                        err = -ENOMEM;
                        goto free_hwmon;
                }
 
-               attrs = __hwmon_create_attrs(dev, drvdata, chip);
+               attrs = __hwmon_create_attrs(drvdata, chip);
                if (IS_ERR(attrs)) {
                        err = PTR_ERR(attrs);
                        goto free_hwmon;
@@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                                           hwmon_temp_input, j))
                                        continue;
                                if (info[i]->config[j] & HWMON_T_INPUT) {
-                                       err = hwmon_thermal_add_sensor(dev,
-                                                               hwdev, j);
+                                       err = hwmon_thermal_add_sensor(hdev, j);
                                        if (err) {
                                                device_unregister(hdev);
                                                /*
@@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        return hdev;
 
 free_hwmon:
-       kfree(hwdev);
+       hwmon_dev_release(hdev);
 ida_remove:
        ida_simple_remove(&hwmon_ida, id);
        return ERR_PTR(err);
index 5c1dddd..e39354f 100644 (file)
@@ -1,13 +1,29 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
+ *             processor hardware monitoring
  *
  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Implementation notes:
+ * - CCD register address information as well as the calculation to
+ *   convert raw register values is from https://github.com/ocerman/zenpower.
+ *   The information is not confirmed from chip datasheets, but experiments
+ *   suggest that it provides reasonable temperature values.
+ * - Register addresses to read chip voltage and current are also from
+ *   https://github.com/ocerman/zenpower, and not confirmed from chip
+ *   datasheets. Current calibration is board specific and not typically
+ *   shared by board vendors. For this reason, current values are
+ *   normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
+ *   current. Reported values can be adjusted using the sensors configuration
+ *   file.
  */
 
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
 #endif
 
 /* CPUID function 0x80000001, ebx */
-#define CPUID_PKGTYPE_MASK     0xf0000000
+#define CPUID_PKGTYPE_MASK     GENMASK(31, 28)
 #define CPUID_PKGTYPE_F                0x00000000
 #define CPUID_PKGTYPE_AM2R2_AM3        0x10000000
 
 /* DRAM controller (PCI function 2) */
 #define REG_DCT0_CONFIG_HIGH           0x094
-#define  DDR3_MODE                     0x00000100
+#define  DDR3_MODE                     BIT(8)
 
 /* miscellaneous (PCI function 3) */
 #define REG_HARDWARE_THERMAL_CONTROL   0x64
-#define  HTC_ENABLE                    0x00000001
+#define  HTC_ENABLE                    BIT(0)
 
 #define REG_REPORTED_TEMPERATURE       0xa4
 
 #define REG_NORTHBRIDGE_CAPABILITIES   0xe8
-#define  NB_CAP_HTC                    0x00000400
+#define  NB_CAP_HTC                    BIT(10)
 
 /*
  * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
@@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
 /* F17h M01h Access througn SMN */
 #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET    0x00059800
 
+#define F17H_M70H_CCD_TEMP(x)                  (0x00059954 + ((x) * 4))
+#define F17H_M70H_CCD_TEMP_VALID               BIT(11)
+#define F17H_M70H_CCD_TEMP_MASK                        GENMASK(10, 0)
+
+#define F17H_M01H_SVI                          0x0005A000
+#define F17H_M01H_SVI_TEL_PLANE0               (F17H_M01H_SVI + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1               (F17H_M01H_SVI + 0x10)
+
+#define CUR_TEMP_SHIFT                         21
+#define CUR_TEMP_RANGE_SEL_MASK                        BIT(19)
+
+#define CFACTOR_ICORE                          1000000 /* 1A / LSB     */
+#define CFACTOR_ISOC                           250000  /* 0.25A / LSB  */
+
 struct k10temp_data {
        struct pci_dev *pdev;
        void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -67,6 +97,10 @@ struct k10temp_data {
        int temp_offset;
        u32 temp_adjust_mask;
        bool show_tdie;
+       u32 show_tccd;
+       u32 svi_addr[2];
+       bool show_current;
+       int cfactor[2];
 };
 
 struct tctl_offset {
@@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = {
        { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
 };
 
+static bool is_threadripper(void)
+{
+       return strstr(boot_cpu_data.x86_model_id, "Threadripper");
+}
+
+static bool is_epyc(void)
+{
+       return strstr(boot_cpu_data.x86_model_id, "EPYC");
+}
+
 static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
 {
        pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
                     F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
 }
 
-static unsigned int get_raw_temp(struct k10temp_data *data)
+static long get_raw_temp(struct k10temp_data *data)
 {
-       unsigned int temp;
        u32 regval;
+       long temp;
 
        data->read_tempreg(data->pdev, &regval);
-       temp = (regval >> 21) * 125;
+       temp = (regval >> CUR_TEMP_SHIFT) * 125;
        if (regval & data->temp_adjust_mask)
                temp -= 49000;
        return temp;
 }
 
-static ssize_t temp1_input_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct k10temp_data *data = dev_get_drvdata(dev);
-       unsigned int temp = get_raw_temp(data);
+const char *k10temp_temp_label[] = {
+       "Tdie",
+       "Tctl",
+       "Tccd1",
+       "Tccd2",
+       "Tccd3",
+       "Tccd4",
+       "Tccd5",
+       "Tccd6",
+       "Tccd7",
+       "Tccd8",
+};
 
-       if (temp > data->temp_offset)
-               temp -= data->temp_offset;
-       else
-               temp = 0;
+const char *k10temp_in_label[] = {
+       "Vcore",
+       "Vsoc",
+};
 
-       return sprintf(buf, "%u\n", temp);
-}
+const char *k10temp_curr_label[] = {
+       "Icore",
+       "Isoc",
+};
 
-static ssize_t temp2_input_show(struct device *dev,
-                               struct device_attribute *devattr, char *buf)
+static int k10temp_read_labels(struct device *dev,
+                              enum hwmon_sensor_types type,
+                              u32 attr, int channel, const char **str)
 {
-       struct k10temp_data *data = dev_get_drvdata(dev);
-       unsigned int temp = get_raw_temp(data);
-
-       return sprintf(buf, "%u\n", temp);
+       switch (type) {
+       case hwmon_temp:
+               *str = k10temp_temp_label[channel];
+               break;
+       case hwmon_in:
+               *str = k10temp_in_label[channel];
+               break;
+       case hwmon_curr:
+               *str = k10temp_curr_label[channel];
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static ssize_t temp_label_show(struct device *dev,
-                              struct device_attribute *devattr, char *buf)
+static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
+                            long *val)
 {
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct k10temp_data *data = dev_get_drvdata(dev);
+       u32 regval;
 
-       return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
+       switch (attr) {
+       case hwmon_curr_input:
+               amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+                            data->svi_addr[channel], &regval);
+               *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
+                                        (regval & 0xff),
+                                        1000);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static ssize_t temp1_max_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
 {
-       return sprintf(buf, "%d\n", 70 * 1000);
+       struct k10temp_data *data = dev_get_drvdata(dev);
+       u32 regval;
+
+       switch (attr) {
+       case hwmon_in_input:
+               amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+                            data->svi_addr[channel], &regval);
+               regval = (regval >> 16) & 0xff;
+               *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static ssize_t temp_crit_show(struct device *dev,
-                             struct device_attribute *devattr, char *buf)
+static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+                            long *val)
 {
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct k10temp_data *data = dev_get_drvdata(dev);
-       int show_hyst = attr->index;
        u32 regval;
-       int value;
 
-       data->read_htcreg(data->pdev, &regval);
-       value = ((regval >> 16) & 0x7f) * 500 + 52000;
-       if (show_hyst)
-               value -= ((regval >> 24) & 0xf) * 500;
-       return sprintf(buf, "%d\n", value);
+       switch (attr) {
+       case hwmon_temp_input:
+               switch (channel) {
+               case 0:         /* Tdie */
+                       *val = get_raw_temp(data) - data->temp_offset;
+                       if (*val < 0)
+                               *val = 0;
+                       break;
+               case 1:         /* Tctl */
+                       *val = get_raw_temp(data);
+                       if (*val < 0)
+                               *val = 0;
+                       break;
+               case 2 ... 9:           /* Tccd{1-8} */
+                       amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+                                    F17H_M70H_CCD_TEMP(channel - 2), &regval);
+                       *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case hwmon_temp_max:
+               *val = 70 * 1000;
+               break;
+       case hwmon_temp_crit:
+               data->read_htcreg(data->pdev, &regval);
+               *val = ((regval >> 16) & 0x7f) * 500 + 52000;
+               break;
+       case hwmon_temp_crit_hyst:
+               data->read_htcreg(data->pdev, &regval);
+               *val = (((regval >> 16) & 0x7f)
+                       - ((regval >> 24) & 0xf)) * 500 + 52000;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static DEVICE_ATTR_RO(temp1_input);
-static DEVICE_ATTR_RO(temp1_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
-static DEVICE_ATTR_RO(temp2_input);
-static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
+static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       switch (type) {
+       case hwmon_temp:
+               return k10temp_read_temp(dev, attr, channel, val);
+       case hwmon_in:
+               return k10temp_read_in(dev, attr, channel, val);
+       case hwmon_curr:
+               return k10temp_read_curr(dev, attr, channel, val);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
 
-static umode_t k10temp_is_visible(struct kobject *kobj,
-                                 struct attribute *attr, int index)
+static umode_t k10temp_is_visible(const void *_data,
+                                 enum hwmon_sensor_types type,
+                                 u32 attr, int channel)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct k10temp_data *data = dev_get_drvdata(dev);
+       const struct k10temp_data *data = _data;
        struct pci_dev *pdev = data->pdev;
        u32 reg;
 
-       switch (index) {
-       case 0 ... 1:   /* temp1_input, temp1_max */
-       default:
-               break;
-       case 2 ... 3:   /* temp1_crit, temp1_crit_hyst */
-               if (!data->read_htcreg)
-                       return 0;
-
-               pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
-                                     &reg);
-               if (!(reg & NB_CAP_HTC))
-                       return 0;
-
-               data->read_htcreg(data->pdev, &reg);
-               if (!(reg & HTC_ENABLE))
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+                       switch (channel) {
+                       case 0:         /* Tdie, or Tctl if we don't show it */
+                               break;
+                       case 1:         /* Tctl */
+                               if (!data->show_tdie)
+                                       return 0;
+                               break;
+                       case 2 ... 9:           /* Tccd{1-8} */
+                               if (!(data->show_tccd & BIT(channel - 2)))
+                                       return 0;
+                               break;
+                       default:
+                               return 0;
+                       }
+                       break;
+               case hwmon_temp_max:
+                       if (channel || data->show_tdie)
+                               return 0;
+                       break;
+               case hwmon_temp_crit:
+               case hwmon_temp_crit_hyst:
+                       if (channel || !data->read_htcreg)
+                               return 0;
+
+                       pci_read_config_dword(pdev,
+                                             REG_NORTHBRIDGE_CAPABILITIES,
+                                             &reg);
+                       if (!(reg & NB_CAP_HTC))
+                               return 0;
+
+                       data->read_htcreg(data->pdev, &reg);
+                       if (!(reg & HTC_ENABLE))
+                               return 0;
+                       break;
+               case hwmon_temp_label:
+                       /* No labels if we don't show the die temperature */
+                       if (!data->show_tdie)
+                               return 0;
+                       switch (channel) {
+                       case 0:         /* Tdie */
+                       case 1:         /* Tctl */
+                               break;
+                       case 2 ... 9:           /* Tccd{1-8} */
+                               if (!(data->show_tccd & BIT(channel - 2)))
+                                       return 0;
+                               break;
+                       default:
+                               return 0;
+                       }
+                       break;
+               default:
                        return 0;
+               }
                break;
-       case 4 ... 6:   /* temp1_label, temp2_input, temp2_label */
-               if (!data->show_tdie)
+       case hwmon_in:
+       case hwmon_curr:
+               if (!data->show_current)
                        return 0;
                break;
+       default:
+               return 0;
        }
-       return attr->mode;
+       return 0444;
 }
 
-static struct attribute *k10temp_attrs[] = {
-       &dev_attr_temp1_input.attr,
-       &dev_attr_temp1_max.attr,
-       &sensor_dev_attr_temp1_crit.dev_attr.attr,
-       &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
-       &sensor_dev_attr_temp1_label.dev_attr.attr,
-       &dev_attr_temp2_input.attr,
-       &sensor_dev_attr_temp2_label.dev_attr.attr,
-       NULL
-};
-
-static const struct attribute_group k10temp_group = {
-       .attrs = k10temp_attrs,
-       .is_visible = k10temp_is_visible,
-};
-__ATTRIBUTE_GROUPS(k10temp);
-
 static bool has_erratum_319(struct pci_dev *pdev)
 {
        u32 pkg_type, reg_dram_cfg;
@@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev)
               (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
 }
 
-static int k10temp_probe(struct pci_dev *pdev,
-                                  const struct pci_device_id *id)
+#ifdef CONFIG_DEBUG_FS
+
+static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
+                                 u32 addr, int count)
+{
+       u32 reg;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               if (!(i & 3))
+                       seq_printf(s, "0x%06x: ", addr + i * 4);
+               amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, &reg);
+               seq_printf(s, "%08x ", reg);
+               if ((i & 3) == 3)
+                       seq_puts(s, "\n");
+       }
+}
+
+static int svi_show(struct seq_file *s, void *unused)
+{
+       struct k10temp_data *data = s->private;
+
+       k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(svi);
+
+static int thm_show(struct seq_file *s, void *unused)
+{
+       struct k10temp_data *data = s->private;
+
+       k10temp_smn_regs_show(s, data->pdev,
+                             F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(thm);
+
+static void k10temp_debugfs_cleanup(void *ddir)
+{
+       debugfs_remove_recursive(ddir);
+}
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+       struct dentry *debugfs;
+       char name[32];
+
+       /* Only show debugfs data for Family 17h/18h CPUs */
+       if (!data->show_tdie)
+               return;
+
+       scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
+
+       debugfs = debugfs_create_dir(name, NULL);
+       if (debugfs) {
+               debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
+               debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
+               devm_add_action_or_reset(&data->pdev->dev,
+                                        k10temp_debugfs_cleanup, debugfs);
+       }
+}
+
+#else
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+}
+
+#endif
+
+static const struct hwmon_channel_info *k10temp_info[] = {
+       HWMON_CHANNEL_INFO(temp,
+                          HWMON_T_INPUT | HWMON_T_MAX |
+                          HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+                          HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL),
+       HWMON_CHANNEL_INFO(in,
+                          HWMON_I_INPUT | HWMON_I_LABEL,
+                          HWMON_I_INPUT | HWMON_I_LABEL),
+       HWMON_CHANNEL_INFO(curr,
+                          HWMON_C_INPUT | HWMON_C_LABEL,
+                          HWMON_C_INPUT | HWMON_C_LABEL),
+       NULL
+};
+
+static const struct hwmon_ops k10temp_hwmon_ops = {
+       .is_visible = k10temp_is_visible,
+       .read = k10temp_read,
+       .read_string = k10temp_read_labels,
+};
+
+static const struct hwmon_chip_info k10temp_chip_info = {
+       .ops = &k10temp_hwmon_ops,
+       .info = k10temp_info,
+};
+
+static void k10temp_get_ccd_support(struct pci_dev *pdev,
+                                   struct k10temp_data *data, int limit)
+{
+       u32 regval;
+       int i;
+
+       for (i = 0; i < limit; i++) {
+               amd_smn_read(amd_pci_dev_to_node_id(pdev),
+                            F17H_M70H_CCD_TEMP(i), &regval);
+               if (regval & F17H_M70H_CCD_TEMP_VALID)
+                       data->show_tccd |= BIT(i);
+       }
+}
+
+static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int unreliable = has_erratum_319(pdev);
        struct device *dev = &pdev->dev;
@@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev,
                data->read_htcreg = read_htcreg_nb_f15;
                data->read_tempreg = read_tempreg_nb_f15;
        } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
-               data->temp_adjust_mask = 0x80000;
+               data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
                data->read_tempreg = read_tempreg_nb_f17;
                data->show_tdie = true;
+
+               switch (boot_cpu_data.x86_model) {
+               case 0x1:       /* Zen */
+               case 0x8:       /* Zen+ */
+               case 0x11:      /* Zen APU */
+               case 0x18:      /* Zen+ APU */
+                       data->show_current = !is_threadripper() && !is_epyc();
+                       data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
+                       data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
+                       data->cfactor[0] = CFACTOR_ICORE;
+                       data->cfactor[1] = CFACTOR_ISOC;
+                       k10temp_get_ccd_support(pdev, data, 4);
+                       break;
+               case 0x31:      /* Zen2 Threadripper */
+               case 0x71:      /* Zen2 */
+                       data->show_current = !is_threadripper() && !is_epyc();
+                       data->cfactor[0] = CFACTOR_ICORE;
+                       data->cfactor[1] = CFACTOR_ISOC;
+                       data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
+                       data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+                       k10temp_get_ccd_support(pdev, data, 8);
+                       break;
+               }
        } else {
                data->read_htcreg = read_htcreg_pci;
                data->read_tempreg = read_tempreg_pci;
@@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev,
                }
        }
 
-       hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
-                                                          k10temp_groups);
-       return PTR_ERR_OR_ZERO(hwmon_dev);
+       hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
+                                                        &k10temp_chip_info,
+                                                        NULL);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
+
+       k10temp_init_debugfs(data);
+
+       return 0;
 }
 
 static const struct pci_device_id k10temp_id_table[] = {
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
new file mode 100644 (file)
index 0000000..eb22a34
--- /dev/null
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX31730 3-Channel Remote Temperature Sensor
+ *
+ * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c,
+                                            0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+
+/* The MAX31730 registers */
+#define MAX31730_REG_TEMP              0x00
+#define MAX31730_REG_CONF              0x13
+#define  MAX31730_STOP                 BIT(7)
+#define  MAX31730_EXTRANGE             BIT(1)
+#define MAX31730_REG_TEMP_OFFSET       0x16
+#define  MAX31730_TEMP_OFFSET_BASELINE 0x77
+#define MAX31730_REG_OFFSET_ENABLE     0x17
+#define MAX31730_REG_TEMP_MAX          0x20
+#define MAX31730_REG_TEMP_MIN          0x30
+#define MAX31730_REG_STATUS_HIGH       0x32
+#define MAX31730_REG_STATUS_LOW                0x33
+#define MAX31730_REG_CHANNEL_ENABLE    0x35
+#define MAX31730_REG_TEMP_FAULT                0x36
+
+#define MAX31730_REG_MFG_ID            0x50
+#define  MAX31730_MFG_ID               0x4d
+#define MAX31730_REG_MFG_REV           0x51
+#define  MAX31730_MFG_REV              0x01
+
+#define MAX31730_TEMP_MIN              (-128000)
+#define MAX31730_TEMP_MAX              127937
+
+/* Each client has this additional data */
+struct max31730_data {
+       struct i2c_client       *client;
+       u8                      orig_conf;
+       u8                      current_conf;
+       u8                      offset_enable;
+       u8                      channel_enable;
+};
+
+/*-----------------------------------------------------------------------*/
+
+static inline long max31730_reg_to_mc(s16 temp)
+{
+       return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16);
+}
+
+static int max31730_write_config(struct max31730_data *data, u8 set_mask,
+                                u8 clr_mask)
+{
+       u8 value;
+
+       clr_mask |= MAX31730_EXTRANGE;
+       value = data->current_conf & ~clr_mask;
+       value |= set_mask;
+
+       if (data->current_conf != value) {
+               s32 err;
+
+               err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF,
+                                               value);
+               if (err)
+                       return err;
+               data->current_conf = value;
+       }
+       return 0;
+}
+
+static int max31730_set_enable(struct i2c_client *client, int reg,
+                              u8 *confdata, int channel, bool enable)
+{
+       u8 regval = *confdata;
+       int err;
+
+       if (enable)
+               regval |= BIT(channel);
+       else
+               regval &= ~BIT(channel);
+
+       if (regval != *confdata) {
+               err = i2c_smbus_write_byte_data(client, reg, regval);
+               if (err)
+                       return err;
+               *confdata = regval;
+       }
+       return 0;
+}
+
+static int max31730_set_offset_enable(struct max31730_data *data, int channel,
+                                     bool enable)
+{
+       return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE,
+                                  &data->offset_enable, channel, enable);
+}
+
+static int max31730_set_channel_enable(struct max31730_data *data, int channel,
+                                      bool enable)
+{
+       return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE,
+                                  &data->channel_enable, channel, enable);
+}
+
+static int max31730_read(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long *val)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+       int regval, reg, offset;
+
+       if (type != hwmon_temp)
+               return -EINVAL;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               if (!(data->channel_enable & BIT(channel)))
+                       return -ENODATA;
+               reg = MAX31730_REG_TEMP + (channel * 2);
+               break;
+       case hwmon_temp_max:
+               reg = MAX31730_REG_TEMP_MAX + (channel * 2);
+               break;
+       case hwmon_temp_min:
+               reg = MAX31730_REG_TEMP_MIN;
+               break;
+       case hwmon_temp_enable:
+               *val = !!(data->channel_enable & BIT(channel));
+               return 0;
+       case hwmon_temp_offset:
+               if (!channel)
+                       return -EINVAL;
+               if (!(data->offset_enable & BIT(channel))) {
+                       *val = 0;
+                       return 0;
+               }
+               offset = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_TEMP_OFFSET);
+               if (offset < 0)
+                       return offset;
+               *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125;
+               return 0;
+       case hwmon_temp_fault:
+               regval = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_TEMP_FAULT);
+               if (regval < 0)
+                       return regval;
+               *val = !!(regval & BIT(channel));
+               return 0;
+       case hwmon_temp_min_alarm:
+               regval = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_STATUS_LOW);
+               if (regval < 0)
+                       return regval;
+               *val = !!(regval & BIT(channel));
+               return 0;
+       case hwmon_temp_max_alarm:
+               regval = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_STATUS_HIGH);
+               if (regval < 0)
+                       return regval;
+               *val = !!(regval & BIT(channel));
+               return 0;
+       default:
+               return -EINVAL;
+       }
+       regval = i2c_smbus_read_word_swapped(data->client, reg);
+       if (regval < 0)
+               return regval;
+
+       *val = max31730_reg_to_mc(regval);
+
+       return 0;
+}
+
+static int max31730_write(struct device *dev, enum hwmon_sensor_types type,
+                         u32 attr, int channel, long val)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+       int reg, err;
+
+       if (type != hwmon_temp)
+               return -EINVAL;
+
+       switch (attr) {
+       case hwmon_temp_max:
+               reg = MAX31730_REG_TEMP_MAX + channel * 2;
+               break;
+       case hwmon_temp_min:
+               reg = MAX31730_REG_TEMP_MIN;
+               break;
+       case hwmon_temp_enable:
+               if (val != 0 && val != 1)
+                       return -EINVAL;
+               return max31730_set_channel_enable(data, channel, val);
+       case hwmon_temp_offset:
+               val = clamp_val(val, -14875, 17000) + 14875;
+               val = DIV_ROUND_CLOSEST(val, 125);
+               err = max31730_set_offset_enable(data, channel,
+                                       val != MAX31730_TEMP_OFFSET_BASELINE);
+               if (err)
+                       return err;
+               return i2c_smbus_write_byte_data(data->client,
+                                                MAX31730_REG_TEMP_OFFSET, val);
+       default:
+               return -EINVAL;
+       }
+
+       val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX);
+       val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4;
+
+       return i2c_smbus_write_word_swapped(data->client, reg, (u16)val);
+}
+
+static umode_t max31730_is_visible(const void *data,
+                                  enum hwmon_sensor_types type,
+                                  u32 attr, int channel)
+{
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+               case hwmon_temp_min_alarm:
+               case hwmon_temp_max_alarm:
+               case hwmon_temp_fault:
+                       return 0444;
+               case hwmon_temp_min:
+                       return channel ? 0444 : 0644;
+               case hwmon_temp_offset:
+               case hwmon_temp_enable:
+               case hwmon_temp_max:
+                       return 0644;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static const struct hwmon_channel_info *max31730_info[] = {
+       HWMON_CHANNEL_INFO(chip,
+                          HWMON_C_REGISTER_TZ),
+       HWMON_CHANNEL_INFO(temp,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_OFFSET | HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+                          HWMON_T_FAULT,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_OFFSET | HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+                          HWMON_T_FAULT,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_OFFSET | HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+                          HWMON_T_FAULT
+                          ),
+       NULL
+};
+
+static const struct hwmon_ops max31730_hwmon_ops = {
+       .is_visible = max31730_is_visible,
+       .read = max31730_read,
+       .write = max31730_write,
+};
+
+static const struct hwmon_chip_info max31730_chip_info = {
+       .ops = &max31730_hwmon_ops,
+       .info = max31730_info,
+};
+
+static void max31730_remove(void *data)
+{
+       struct max31730_data *max31730 = data;
+       struct i2c_client *client = max31730->client;
+
+       i2c_smbus_write_byte_data(client, MAX31730_REG_CONF,
+                                 max31730->orig_conf);
+}
+
+static int
+max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       struct max31730_data *data;
+       int status, err;
+
+       if (!i2c_check_functionality(client->adapter,
+                       I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+               return -EIO;
+
+       data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->client = client;
+
+       /* Cache original configuration and enable status */
+       status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE);
+       if (status < 0)
+               return status;
+       data->channel_enable = status;
+
+       status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE);
+       if (status < 0)
+               return status;
+       data->offset_enable = status;
+
+       status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF);
+       if (status < 0)
+               return status;
+       data->orig_conf = status;
+       data->current_conf = status;
+
+       err = max31730_write_config(data,
+                                   data->channel_enable ? 0 : MAX31730_STOP,
+                                   data->channel_enable ? MAX31730_STOP : 0);
+       if (err)
+               return err;
+
+       dev_set_drvdata(dev, data);
+
+       err = devm_add_action_or_reset(dev, max31730_remove, data);
+       if (err)
+               return err;
+
+       hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+                                                        data,
+                                                        &max31730_chip_info,
+                                                        NULL);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31730_ids[] = {
+       { "max31730", 0, },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, max31730_ids);
+
+static const struct of_device_id __maybe_unused max31730_of_match[] = {
+       {
+               .compatible = "maxim,max31730",
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(of, max31730_of_match);
+
+static bool max31730_check_reg_temp(struct i2c_client *client,
+                                   int reg)
+{
+       int regval;
+
+       regval = i2c_smbus_read_byte_data(client, reg + 1);
+       return regval < 0 || (regval & 0x0f);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max31730_detect(struct i2c_client *client,
+                          struct i2c_board_info *info)
+{
+       struct i2c_adapter *adapter = client->adapter;
+       int regval;
+       int i;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
+
+       regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID);
+       if (regval != MAX31730_MFG_ID)
+               return -ENODEV;
+       regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV);
+       if (regval != MAX31730_MFG_REV)
+               return -ENODEV;
+
+       /* lower 4 bit of temperature and limit registers must be 0 */
+       if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN))
+               return -ENODEV;
+
+       for (i = 0; i < 4; i++) {
+               if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2))
+                       return -ENODEV;
+               if (max31730_check_reg_temp(client,
+                                           MAX31730_REG_TEMP_MAX + i * 2))
+                       return -ENODEV;
+       }
+
+       strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+
+       return 0;
+}
+
+static int __maybe_unused max31730_suspend(struct device *dev)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+
+       return max31730_write_config(data, MAX31730_STOP, 0);
+}
+
+static int __maybe_unused max31730_resume(struct device *dev)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+
+       return max31730_write_config(data, 0, MAX31730_STOP);
+}
+
+static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+
+static struct i2c_driver max31730_driver = {
+       .class          = I2C_CLASS_HWMON,
+       .driver = {
+               .name   = "max31730",
+               .of_match_table = of_match_ptr(max31730_of_match),
+               .pm     = &max31730_pm_ops,
+       },
+       .probe          = max31730_probe,
+       .id_table       = max31730_ids,
+       .detect         = max31730_detect,
+       .address_list   = normal_i2c,
+};
+
+module_i2c_driver(max31730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX31730 driver");
+MODULE_LICENSE("GPL");
index f3dd2a1..2e97e56 100644 (file)
@@ -23,8 +23,8 @@
 static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
 
 static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
-       { 0x40, 0x00, 0x42, 0x44, 0x46 },
-       { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+       { 0x46, 0x00, 0x40, 0x42, 0x44 },
+       { 0x45, 0x00, 0x3f, 0x41, 0x43 },
 };
 
 static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
 struct nct7802_data {
        struct regmap *regmap;
        struct mutex access_lock; /* for multi-byte read and write operations */
+       u8 in_status;
+       struct mutex in_alarm_lock;
 };
 
 static ssize_t temp_type_show(struct device *dev,
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
        return err ? : count;
 }
 
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+       struct nct7802_data *data = dev_get_drvdata(dev);
+       int volt, min, max, ret;
+       unsigned int val;
+
+       mutex_lock(&data->in_alarm_lock);
+
+       /*
+        * The SMI Voltage status register is the only register giving a status
+        * for voltages. A bit is set for each input crossing a threshold, in
+        * both direction, but the "inside" or "outside" limits info is not
+        * available. Also this register is cleared on read.
+        * Note: this is not explicitly spelled out in the datasheet, but
+        * from experiment.
+        * To deal with this we use a status cache with one validity bit and
+        * one status bit for each input. Validity is cleared at startup and
+        * each time the register reports a change, and the status is processed
+        * by software based on current input value and limits.
+        */
+       ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+       if (ret < 0)
+               goto abort;
+
+       /* invalidate cached status for all inputs crossing a threshold */
+       data->in_status &= ~((val & 0x0f) << 4);
+
+       /* if cached status for requested input is invalid, update it */
+       if (!(data->in_status & (0x10 << sattr->index))) {
+               ret = nct7802_read_voltage(data, sattr->nr, 0);
+               if (ret < 0)
+                       goto abort;
+               volt = ret;
+
+               ret = nct7802_read_voltage(data, sattr->nr, 1);
+               if (ret < 0)
+                       goto abort;
+               min = ret;
+
+               ret = nct7802_read_voltage(data, sattr->nr, 2);
+               if (ret < 0)
+                       goto abort;
+               max = ret;
+
+               if (volt < min || volt > max)
+                       data->in_status |= (1 << sattr->index);
+               else
+                       data->in_status &= ~(1 << sattr->index);
+
+               data->in_status |= 0x10 << sattr->index;
+       }
+
+       ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+       mutex_unlock(&data->in_alarm_lock);
+       return ret;
+}
+
 static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
 static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
 static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
 
 static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
 static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
 
 static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
 
 static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
 static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
 
 static struct attribute *nct7802_in_attrs[] = {
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
                return PTR_ERR(data->regmap);
 
        mutex_init(&data->access_lock);
+       mutex_init(&data->in_alarm_lock);
 
        ret = nct7802_init_chip(data);
        if (ret < 0)
index 5985997..a9ea062 100644 (file)
@@ -20,8 +20,8 @@ config SENSORS_PMBUS
        help
          If you say yes here you get hardware monitoring support for generic
          PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
-         MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
-         TPS544B25, TPS544C20, TPS544C25, and UDT020.
+         MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
+         TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
 
          This driver can also be built as a module. If so, the module will
          be called pmbus.
@@ -145,6 +145,15 @@ config SENSORS_MAX16064
          This driver can also be built as a module. If so, the module will
          be called max16064.
 
+config SENSORS_MAX20730
+       tristate "Maxim MAX20730, MAX20734, MAX20743"
+       help
+         If you say yes here you get hardware monitoring support for Maxim
+         MAX20730, MAX20734, and MAX20743.
+
+         This driver can also be built as a module. If so, the module will
+         be called max20730.
+
 config SENSORS_MAX20751
        tristate "Maxim MAX20751"
        help
@@ -200,20 +209,20 @@ config SENSORS_TPS40422
          be called tps40422.
 
 config SENSORS_TPS53679
-       tristate "TI TPS53679"
+       tristate "TI TPS53679, TPS53688"
        help
          If you say yes here you get hardware monitoring support for TI
-         TPS53679.
+         TPS53679, TPS53688
 
          This driver can also be built as a module. If so, the module will
          be called tps53679.
 
 config SENSORS_UCD9000
-       tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
+       tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
        help
          If you say yes here you get hardware monitoring support for TI
-         UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
-         Health Controllers.
+         UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer
+         and System Health Controllers.
 
          This driver can also be built as a module. If so, the module will
          be called ucd9000.
@@ -228,6 +237,15 @@ config SENSORS_UCD9200
          This driver can also be built as a module. If so, the module will
          be called ucd9200.
 
+config SENSORS_XDPE122
+       tristate "Infineon XDPE122 family"
+       help
+         If you say yes here you get hardware monitoring support for Infineon
+         XDPE12254, XDPE12284, device.
+
+         This driver can also be built as a module. If so, the module will
+         be called xdpe12284.
+
 config SENSORS_ZL6100
        tristate "Intersil ZL6100 and compatibles"
        help
index 3f8c101..5feb458 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
 obj-$(CONFIG_SENSORS_LTC2978)  += ltc2978.o
 obj-$(CONFIG_SENSORS_LTC3815)  += ltc3815.o
 obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
 obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
 obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
 obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422)        += tps40422.o
 obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
 obj-$(CONFIG_SENSORS_UCD9000)  += ucd9000.o
 obj-$(CONFIG_SENSORS_UCD9200)  += ucd9200.o
+obj-$(CONFIG_SENSORS_XDPE122)  += xdpe12284.o
 obj-$(CONFIG_SENSORS_ZL6100)   += zl6100.o
index d359b76..3795fe5 100644 (file)
 
 #define CFFPS_FRU_CMD                          0x9A
 #define CFFPS_PN_CMD                           0x9B
+#define CFFPS_HEADER_CMD                       0x9C
 #define CFFPS_SN_CMD                           0x9E
+#define CFFPS_MAX_POWER_OUT_CMD                        0xA7
 #define CFFPS_CCIN_CMD                         0xBD
 #define CFFPS_FW_CMD                           0xFA
 #define CFFPS1_FW_NUM_BYTES                    4
 #define CFFPS2_FW_NUM_WORDS                    3
 #define CFFPS_SYS_CONFIG_CMD                   0xDA
+#define CFFPS_12VCS_VOUT_CMD                   0xDE
 
 #define CFFPS_INPUT_HISTORY_CMD                        0xD6
 #define CFFPS_INPUT_HISTORY_SIZE               100
 #define CFFPS_MFR_VAUX_FAULT                   BIT(6)
 #define CFFPS_MFR_CURRENT_SHARE_WARNING                BIT(7)
 
-/*
- * LED off state actually relinquishes LED control to PSU firmware, so it can
- * turn on the LED for faults.
- */
-#define CFFPS_LED_OFF                          0
 #define CFFPS_LED_BLINK                                BIT(0)
 #define CFFPS_LED_ON                           BIT(1)
+#define CFFPS_LED_OFF                          BIT(2)
 #define CFFPS_BLINK_RATE_MS                    250
 
 enum {
        CFFPS_DEBUGFS_INPUT_HISTORY = 0,
        CFFPS_DEBUGFS_FRU,
        CFFPS_DEBUGFS_PN,
+       CFFPS_DEBUGFS_HEADER,
        CFFPS_DEBUGFS_SN,
+       CFFPS_DEBUGFS_MAX_POWER_OUT,
        CFFPS_DEBUGFS_CCIN,
        CFFPS_DEBUGFS_FW,
+       CFFPS_DEBUGFS_ON_OFF_CONFIG,
        CFFPS_DEBUGFS_NUM_ENTRIES
 };
 
@@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
                                       psu->input_history.byte_count);
 }
 
-static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
-                                   size_t count, loff_t *ppos)
+static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
+                                     size_t count, loff_t *ppos)
 {
        u8 cmd;
        int i, rc;
        int *idxp = file->private_data;
        int idx = *idxp;
        struct ibm_cffps *psu = to_psu(idxp, idx);
-       char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+       char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
 
        pmbus_set_page(psu->client, 0);
 
@@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
        case CFFPS_DEBUGFS_PN:
                cmd = CFFPS_PN_CMD;
                break;
+       case CFFPS_DEBUGFS_HEADER:
+               cmd = CFFPS_HEADER_CMD;
+               break;
        case CFFPS_DEBUGFS_SN:
                cmd = CFFPS_SN_CMD;
                break;
+       case CFFPS_DEBUGFS_MAX_POWER_OUT:
+               rc = i2c_smbus_read_word_swapped(psu->client,
+                                                CFFPS_MAX_POWER_OUT_CMD);
+               if (rc < 0)
+                       return rc;
+
+               rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc);
+               goto done;
        case CFFPS_DEBUGFS_CCIN:
                rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
                if (rc < 0)
@@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
                        return -EOPNOTSUPP;
                }
                goto done;
+       case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+               rc = i2c_smbus_read_byte_data(psu->client,
+                                             PMBUS_ON_OFF_CONFIG);
+               if (rc < 0)
+                       return rc;
+
+               rc = snprintf(data, 3, "%02x", rc);
+               goto done;
        default:
                return -EINVAL;
        }
@@ -214,9 +235,42 @@ done:
        return simple_read_from_buffer(buf, count, ppos, data, rc);
 }
 
+static ssize_t ibm_cffps_debugfs_write(struct file *file,
+                                      const char __user *buf, size_t count,
+                                      loff_t *ppos)
+{
+       u8 data;
+       ssize_t rc;
+       int *idxp = file->private_data;
+       int idx = *idxp;
+       struct ibm_cffps *psu = to_psu(idxp, idx);
+
+       switch (idx) {
+       case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+               pmbus_set_page(psu->client, 0);
+
+               rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
+               if (rc <= 0)
+                       return rc;
+
+               rc = i2c_smbus_write_byte_data(psu->client,
+                                              PMBUS_ON_OFF_CONFIG, data);
+               if (rc)
+                       return rc;
+
+               rc = 1;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return rc;
+}
+
 static const struct file_operations ibm_cffps_fops = {
        .llseek = noop_llseek,
-       .read = ibm_cffps_debugfs_op,
+       .read = ibm_cffps_debugfs_read,
+       .write = ibm_cffps_debugfs_write,
        .open = simple_open,
 };
 
@@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
                if (mfr & CFFPS_MFR_PS_KILL)
                        rc |= PB_STATUS_OFF;
                break;
+       case PMBUS_VIRT_READ_VMON:
+               rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+               break;
        default:
                rc = -ENODATA;
                break;
@@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
        rc = devm_led_classdev_register(dev, &psu->led);
        if (rc)
                dev_warn(dev, "failed to register led class: %d\n", rc);
+       else
+               i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD,
+                                         CFFPS_LED_OFF);
 }
 
 static struct pmbus_driver_info ibm_cffps_info[] = {
@@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
                        PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
                        PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
                        PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
-                       PMBUS_HAVE_STATUS_FAN12,
+                       PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON,
                .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
                        PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
                        PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
@@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client,
        debugfs_create_file("part_number", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
                            &ibm_cffps_fops);
+       debugfs_create_file("header", 0444, ibm_cffps_dir,
+                           &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER],
+                           &ibm_cffps_fops);
        debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
                            &ibm_cffps_fops);
+       debugfs_create_file("max_power_out", 0444, ibm_cffps_dir,
+                           &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT],
+                           &ibm_cffps_fops);
        debugfs_create_file("ccin", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
                            &ibm_cffps_fops);
        debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
                            &ibm_cffps_fops);
+       debugfs_create_file("on_off_config", 0644, ibm_cffps_dir,
+                           &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG],
+                           &ibm_cffps_fops);
 
        return 0;
 }
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
new file mode 100644 (file)
index 0000000..294e221
--- /dev/null
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down
+ * Switching Regulators
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include <linux/util_macros.h>
+#include "pmbus.h"
+
+enum chips {
+       max20730,
+       max20734,
+       max20743
+};
+
+struct max20730_data {
+       enum chips id;
+       struct pmbus_driver_info info;
+       struct mutex lock;      /* Used to protect against parallel writes */
+       u16 mfr_devset1;
+};
+
+#define to_max20730_data(x)  container_of(x, struct max20730_data, info)
+
+#define MAX20730_MFR_DEVSET1   0xd2
+
+/*
+ * Convert discreet value to direct data format. Strictly speaking, all passed
+ * values are constants, so we could do that calculation manually. On the
+ * downside, that would make the driver more difficult to maintain, so lets
+ * use this approach.
+ */
+static u16 val_to_direct(int v, enum pmbus_sensor_classes class,
+                        const struct pmbus_driver_info *info)
+{
+       int R = info->R[class] - 3;     /* take milli-units into account */
+       int b = info->b[class] * 1000;
+       long d;
+
+       d = v * info->m[class] + b;
+       /*
+        * R < 0 is true for all callers, so we don't need to bother
+        * about the R > 0 case.
+        */
+       while (R < 0) {
+               d = DIV_ROUND_CLOSEST(d, 10);
+               R++;
+       }
+       return (u16)d;
+}
+
+static long direct_to_val(u16 w, enum pmbus_sensor_classes class,
+                         const struct pmbus_driver_info *info)
+{
+       int R = info->R[class] - 3;
+       int b = info->b[class] * 1000;
+       int m = info->m[class];
+       long d = (s16)w;
+
+       if (m == 0)
+               return 0;
+
+       while (R < 0) {
+               d *= 10;
+               R++;
+       }
+       d = (d - b) / m;
+       return d;
+}
+
+static u32 max_current[][5] = {
+       [max20730] = { 13000, 16600, 20100, 23600 },
+       [max20734] = { 21000, 27000, 32000, 38000 },
+       [max20743] = { 18900, 24100, 29200, 34100 },
+};
+
+static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+{
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       const struct max20730_data *data = to_max20730_data(info);
+       int ret = 0;
+       u32 max_c;
+
+       switch (reg) {
+       case PMBUS_OT_FAULT_LIMIT:
+               switch ((data->mfr_devset1 >> 11) & 0x3) {
+               case 0x0:
+                       ret = val_to_direct(150000, PSC_TEMPERATURE, info);
+                       break;
+               case 0x1:
+                       ret = val_to_direct(130000, PSC_TEMPERATURE, info);
+                       break;
+               default:
+                       ret = -ENODATA;
+                       break;
+               }
+               break;
+       case PMBUS_IOUT_OC_FAULT_LIMIT:
+               max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
+               ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
+               break;
+       default:
+               ret = -ENODATA;
+               break;
+       }
+       return ret;
+}
+
+static int max20730_write_word_data(struct i2c_client *client, int page,
+                                   int reg, u16 word)
+{
+       struct pmbus_driver_info *info;
+       struct max20730_data *data;
+       u16 devset1;
+       int ret = 0;
+       int idx;
+
+       info = (struct pmbus_driver_info *)pmbus_get_driver_info(client);
+       data = to_max20730_data(info);
+
+       mutex_lock(&data->lock);
+       devset1 = data->mfr_devset1;
+
+       switch (reg) {
+       case PMBUS_OT_FAULT_LIMIT:
+               devset1 &= ~(BIT(11) | BIT(12));
+               if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000)
+                       devset1 |= BIT(11);
+               break;
+       case PMBUS_IOUT_OC_FAULT_LIMIT:
+               devset1 &= ~(BIT(5) | BIT(6));
+
+               idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info),
+                                  max_current[data->id], 4);
+               devset1 |= (idx << 5);
+               break;
+       default:
+               ret = -ENODATA;
+               break;
+       }
+
+       if (!ret && devset1 != data->mfr_devset1) {
+               ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1,
+                                               devset1);
+               if (!ret) {
+                       data->mfr_devset1 = devset1;
+                       pmbus_clear_cache(client);
+               }
+       }
+       mutex_unlock(&data->lock);
+       return ret;
+}
+
+static const struct pmbus_driver_info max20730_info[] = {
+       [max20730] = {
+               .pages = 1,
+               .read_word_data = max20730_read_word_data,
+               .write_word_data = max20730_write_word_data,
+
+               /* Source : Maxim AN6042 */
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_TEMPERATURE] = 21,
+               .b[PSC_TEMPERATURE] = 5887,
+               .R[PSC_TEMPERATURE] = -1,
+
+               .format[PSC_VOLTAGE_IN] = direct,
+               .m[PSC_VOLTAGE_IN] = 3609,
+               .b[PSC_VOLTAGE_IN] = 0,
+               .R[PSC_VOLTAGE_IN] = -2,
+
+               /*
+                * Values in the datasheet are adjusted for temperature and
+                * for the relationship between Vin and Vout.
+                * Unfortunately, the data sheet suggests that Vout measurement
+                * may be scaled with a resistor array. This is indeed the case
+                * at least on the evaulation boards. As a result, any in-driver
+                * adjustments would either be wrong or require elaborate means
+                * to configure the scaling. Instead of doing that, just report
+                * raw values and let userspace handle adjustments.
+                */
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_CURRENT_OUT] = 153,
+               .b[PSC_CURRENT_OUT] = 4976,
+               .R[PSC_CURRENT_OUT] = -1,
+
+               .format[PSC_VOLTAGE_OUT] = linear,
+
+               .func[0] = PMBUS_HAVE_VIN |
+                       PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+                       PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+       },
+       [max20734] = {
+               .pages = 1,
+               .read_word_data = max20730_read_word_data,
+               .write_word_data = max20730_write_word_data,
+
+               /* Source : Maxim AN6209 */
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_TEMPERATURE] = 21,
+               .b[PSC_TEMPERATURE] = 5887,
+               .R[PSC_TEMPERATURE] = -1,
+
+               .format[PSC_VOLTAGE_IN] = direct,
+               .m[PSC_VOLTAGE_IN] = 3592,
+               .b[PSC_VOLTAGE_IN] = 0,
+               .R[PSC_VOLTAGE_IN] = -2,
+
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_CURRENT_OUT] = 111,
+               .b[PSC_CURRENT_OUT] = 3461,
+               .R[PSC_CURRENT_OUT] = -1,
+
+               .format[PSC_VOLTAGE_OUT] = linear,
+
+               .func[0] = PMBUS_HAVE_VIN |
+                       PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+                       PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+       },
+       [max20743] = {
+               .pages = 1,
+               .read_word_data = max20730_read_word_data,
+               .write_word_data = max20730_write_word_data,
+
+               /* Source : Maxim AN6042 */
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_TEMPERATURE] = 21,
+               .b[PSC_TEMPERATURE] = 5887,
+               .R[PSC_TEMPERATURE] = -1,
+
+               .format[PSC_VOLTAGE_IN] = direct,
+               .m[PSC_VOLTAGE_IN] = 3597,
+               .b[PSC_VOLTAGE_IN] = 0,
+               .R[PSC_VOLTAGE_IN] = -2,
+
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_CURRENT_OUT] = 95,
+               .b[PSC_CURRENT_OUT] = 5014,
+               .R[PSC_CURRENT_OUT] = -1,
+
+               .format[PSC_VOLTAGE_OUT] = linear,
+
+               .func[0] = PMBUS_HAVE_VIN |
+                       PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+                       PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+       },
+};
+
+static int max20730_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+       struct max20730_data *data;
+       enum chips chip_id;
+       int ret;
+
+       if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_SMBUS_READ_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_READ_WORD_DATA |
+                                    I2C_FUNC_SMBUS_BLOCK_DATA))
+               return -ENODEV;
+
+       ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+       if (ret < 0) {
+               dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+               return ret;
+       }
+       if (ret != 5 || strncmp(buf, "MAXIM", 5)) {
+               buf[ret] = '\0';
+               dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+               return -ENODEV;
+       }
+
+       /*
+        * The chips support reading PMBUS_MFR_MODEL. On both MAX20730
+        * and MAX20734, reading it returns M20743. Presumably that is
+        * the reason why the command is not documented. Unfortunately,
+        * that means that there is no reliable means to detect the chip.
+        * However, we can at least detect the chip series. Compare
+        * the returned value against 'M20743' and bail out if there is
+        * a mismatch. If that doesn't work for all chips, we may have
+        * to remove this check.
+        */
+       ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read Manufacturer Model\n");
+               return ret;
+       }
+       if (ret != 6 || strncmp(buf, "M20743", 6)) {
+               buf[ret] = '\0';
+               dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf);
+               return -ENODEV;
+       }
+
+       ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read Manufacturer Revision\n");
+               return ret;
+       }
+       if (ret != 1 || buf[0] != 'F') {
+               buf[ret] = '\0';
+               dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf);
+               return -ENODEV;
+       }
+
+       if (client->dev.of_node)
+               chip_id = (enum chips)of_device_get_match_data(dev);
+       else
+               chip_id = id->driver_data;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       data->id = chip_id;
+       mutex_init(&data->lock);
+       memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+
+       ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
+       if (ret < 0)
+               return ret;
+       data->mfr_devset1 = ret;
+
+       return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max20730_id[] = {
+       { "max20730", max20730 },
+       { "max20734", max20734 },
+       { "max20743", max20743 },
+       { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20730_id);
+
+static const struct of_device_id max20730_of_match[] = {
+       { .compatible = "maxim,max20730", .data = (void *)max20730 },
+       { .compatible = "maxim,max20734", .data = (void *)max20734 },
+       { .compatible = "maxim,max20743", .data = (void *)max20743 },
+       { },
+};
+
+MODULE_DEVICE_TABLE(of, max20730_of_match);
+
+static struct i2c_driver max20730_driver = {
+       .driver = {
+               .name = "max20730",
+               .of_match_table = max20730_of_match,
+       },
+       .probe = max20730_probe,
+       .remove = pmbus_do_remove,
+       .id_table = max20730_id,
+};
+
+module_i2c_driver(max20730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743");
+MODULE_LICENSE("GPL");
index ee5f0cd..da3c38c 100644 (file)
@@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = {
        .pages = 1,
        .format[PSC_VOLTAGE_IN] = linear,
        .format[PSC_VOLTAGE_OUT] = vid,
-       .vrm_version = vr12,
+       .vrm_version[0] = vr12,
        .format[PSC_TEMPERATURE] = linear,
        .format[PSC_CURRENT_OUT] = linear,
        .format[PSC_POWER] = linear,
index c0bc43d..51e8312 100644 (file)
@@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client,
        }
 
        if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
-               int vout_mode;
+               int vout_mode, i;
 
                vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
                if (vout_mode >= 0 && vout_mode != 0xff) {
@@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client,
                                break;
                        case 1:
                                info->format[PSC_VOLTAGE_OUT] = vid;
-                               info->vrm_version = vr11;
+                               for (i = 0; i < info->pages; i++)
+                                       info->vrm_version[i] = vr11;
                                break;
                        case 2:
                                info->format[PSC_VOLTAGE_OUT] = direct;
@@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = {
        {"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
        {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
        {"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+       {"max20796", (kernel_ulong_t)&pmbus_info_one},
        {"mdt040", (kernel_ulong_t)&pmbus_info_one},
        {"ncp4200", (kernel_ulong_t)&pmbus_info_one},
        {"ncp4208", (kernel_ulong_t)&pmbus_info_one},
index d198af3..13b34bd 100644 (file)
@@ -22,6 +22,8 @@ enum pmbus_regs {
        PMBUS_CLEAR_FAULTS              = 0x03,
        PMBUS_PHASE                     = 0x04,
 
+       PMBUS_WRITE_PROTECT             = 0x10,
+
        PMBUS_CAPABILITY                = 0x19,
        PMBUS_QUERY                     = 0x1A,
 
@@ -225,6 +227,15 @@ enum pmbus_regs {
  */
 #define PB_OPERATION_CONTROL_ON                BIT(7)
 
+/*
+ * WRITE_PROTECT
+ */
+#define PB_WP_ALL      BIT(7)  /* all but WRITE_PROTECT */
+#define PB_WP_OP       BIT(6)  /* all but WP, OPERATION, PAGE */
+#define PB_WP_VOUT     BIT(5)  /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */
+
+#define PB_WP_ANY      (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT)
+
 /*
  * CAPABILITY
  */
@@ -377,12 +388,12 @@ enum pmbus_sensor_classes {
 #define PMBUS_PAGE_VIRTUAL     BIT(31)
 
 enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12, vr13 };
+enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
 
 struct pmbus_driver_info {
        int pages;              /* Total number of pages */
        enum pmbus_data_format format[PSC_NUM_CLASSES];
-       enum vrm_version vrm_version;
+       enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
        /*
         * Support one set of coefficients for each sensor type
         * Used for chips providing data in direct mode.
index 8470097..d9c17fe 100644 (file)
@@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
        long val = sensor->data;
        long rv = 0;
 
-       switch (data->info->vrm_version) {
+       switch (data->info->vrm_version[sensor->page]) {
        case vr11:
                if (val >= 0x02 && val <= 0xb2)
                        rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
@@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
                if (val >= 0x01)
                        rv = 500 + (val - 1) * 10;
                break;
+       case imvp9:
+               if (val >= 0x01)
+                       rv = 200 + (val - 1) * 10;
+               break;
+       case amd625mv:
+               if (val >= 0x0 && val <= 0xd8)
+                       rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100);
+               break;
        }
        return rv;
 }
@@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
                snprintf(sensor->name, sizeof(sensor->name), "%s%d",
                         name, seq);
 
+       if (data->flags & PMBUS_WRITE_PROTECTED)
+               readonly = true;
+
        sensor->page = page;
        sensor->reg = reg;
        sensor->class = class;
@@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
        if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
                client->flags |= I2C_CLIENT_PEC;
 
+       /*
+        * Check if the chip is write protected. If it is, we can not clear
+        * faults, and we should not try it. Also, in that case, writes into
+        * limit registers need to be disabled.
+        */
+       ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+       if (ret > 0 && (ret & PB_WP_ANY))
+               data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+
        if (data->info->pages)
                pmbus_clear_faults(client);
        else
index ebe3f02..517584c 100644 (file)
 static int pxe1610_identify(struct i2c_client *client,
                             struct pmbus_driver_info *info)
 {
-       if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
-               u8 vout_mode;
-               int ret;
-
-               /* Read the register with VOUT scaling value.*/
-               ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
-               if (ret < 0)
-                       return ret;
-
-               vout_mode = ret & GENMASK(4, 0);
-
-               switch (vout_mode) {
-               case 1:
-                       info->vrm_version = vr12;
-                       break;
-               case 2:
-                       info->vrm_version = vr13;
-                       break;
-               default:
-                       return -ENODEV;
+       int i;
+
+       for (i = 0; i < PXE1610_NUM_PAGES; i++) {
+               if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) {
+                       u8 vout_mode;
+                       int ret;
+
+                       /* Read the register with VOUT scaling value.*/
+                       ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+                       if (ret < 0)
+                               return ret;
+
+                       vout_mode = ret & GENMASK(4, 0);
+
+                       switch (vout_mode) {
+                       case 1:
+                               info->vrm_version[i] = vr12;
+                               break;
+                       case 2:
+                               info->vrm_version[i] = vr13;
+                               break;
+                       default:
+                               return -ENODEV;
+                       }
                }
        }
 
index 86bb3ac..9c22e90 100644 (file)
@@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client,
                             struct pmbus_driver_info *info)
 {
        u8 vout_params;
-       int ret;
-
-       /* Read the register with VOUT scaling value.*/
-       ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
-       if (ret < 0)
-               return ret;
-
-       vout_params = ret & GENMASK(4, 0);
-
-       switch (vout_params) {
-       case TPS53679_PROT_VR13_10MV:
-       case TPS53679_PROT_VR12_5_10MV:
-               info->vrm_version = vr13;
-               break;
-       case TPS53679_PROT_VR13_5MV:
-       case TPS53679_PROT_VR12_5MV:
-       case TPS53679_PROT_IMVP8_5MV:
-               info->vrm_version = vr12;
-               break;
-       default:
-               return -EINVAL;
+       int i, ret;
+
+       for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+               /* Read the register with VOUT scaling value.*/
+               ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+               if (ret < 0)
+                       return ret;
+
+               vout_params = ret & GENMASK(4, 0);
+
+               switch (vout_params) {
+               case TPS53679_PROT_VR13_10MV:
+               case TPS53679_PROT_VR12_5_10MV:
+                       info->vrm_version[i] = vr13;
+                       break;
+               case TPS53679_PROT_VR13_5MV:
+               case TPS53679_PROT_VR12_5MV:
+               case TPS53679_PROT_IMVP8_5MV:
+                       info->vrm_version[i] = vr12;
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        return 0;
@@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client,
 
 static const struct i2c_device_id tps53679_id[] = {
        {"tps53679", 0},
+       {"tps53688", 0},
        {}
 };
 
@@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id);
 
 static const struct of_device_id __maybe_unused tps53679_of_match[] = {
        {.compatible = "ti,tps53679"},
+       {.compatible = "ti,tps53688"},
        {}
 };
 MODULE_DEVICE_TABLE(of, tps53679_of_match);
index a9229c6..23ea341 100644 (file)
@@ -18,7 +18,8 @@
 #include <linux/gpio/driver.h>
 #include "pmbus.h"
 
-enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+            ucd90910 };
 
 #define UCD9000_MONITOR_CONFIG         0xd5
 #define UCD9000_NUM_PAGES              0xd6
@@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
 #define UCD9000_GPIO_OUTPUT            1
 
 #define UCD9000_MON_TYPE(x)    (((x) >> 5) & 0x07)
-#define UCD9000_MON_PAGE(x)    ((x) & 0x0f)
+#define UCD9000_MON_PAGE(x)    ((x) & 0x1f)
 
 #define UCD9000_MON_VOLTAGE    1
 #define UCD9000_MON_TEMPERATURE        2
@@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
 #define UCD9000_GPIO_NAME_LEN  16
 #define UCD9090_NUM_GPIOS      23
 #define UCD901XX_NUM_GPIOS     26
+#define UCD90320_NUM_GPIOS     84
 #define UCD90910_NUM_GPIOS     26
 
 #define UCD9000_DEBUGFS_NAME_LEN       24
 #define UCD9000_GPI_COUNT              8
+#define UCD90320_GPI_COUNT             32
 
 struct ucd9000_data {
        u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
@@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = {
        {"ucd90120", ucd90120},
        {"ucd90124", ucd90124},
        {"ucd90160", ucd90160},
+       {"ucd90320", ucd90320},
        {"ucd9090", ucd9090},
        {"ucd90910", ucd90910},
        {}
@@ -154,6 +158,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
                .compatible = "ti,ucd90160",
                .data = (void *)ucd90160
        },
+       {
+               .compatible = "ti,ucd90320",
+               .data = (void *)ucd90320
+       },
        {
                .compatible = "ti,ucd9090",
                .data = (void *)ucd9090
@@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
        case ucd90160:
                data->gpio.ngpio = UCD901XX_NUM_GPIOS;
                break;
+       case ucd90320:
+               data->gpio.ngpio = UCD90320_NUM_GPIOS;
+               break;
        case ucd90910:
                data->gpio.ngpio = UCD90910_NUM_GPIOS;
                break;
@@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val)
        struct ucd9000_debugfs_entry *entry = data;
        struct i2c_client *client = entry->client;
        u8 buffer[I2C_SMBUS_BLOCK_MAX];
-       int ret;
+       int ret, i;
 
        ret = ucd9000_get_mfr_status(client, buffer);
        if (ret < 0)
                return ret;
 
        /*
-        * Attribute only created for devices with gpi fault bits at bits
-        * 16-23, which is the second byte of the response.
+        * GPI fault bits are in sets of 8, two bytes from end of response.
         */
-       *val = !!(buffer[1] & BIT(entry->index));
+       i = ret - 3 - entry->index / 8;
+       if (i >= 0)
+               *val = !!(buffer[i] & BIT(entry->index % 8));
 
        return 0;
 }
@@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
 {
        struct dentry *debugfs;
        struct ucd9000_debugfs_entry *entries;
-       int i;
+       int i, gpi_count;
        char name[UCD9000_DEBUGFS_NAME_LEN];
 
        debugfs = pmbus_get_debugfs_dir(client);
@@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
 
        /*
         * Of the chips this driver supports, only the UCD9090, UCD90160,
-        * and UCD90910 report GPI faults in their MFR_STATUS register, so only
-        * create the GPI fault debugfs attributes for those chips.
+        * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS
+        * register, so only create the GPI fault debugfs attributes for those
+        * chips.
         */
        if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 ||
-           mid->driver_data == ucd90910) {
+           mid->driver_data == ucd90320 || mid->driver_data == ucd90910) {
+               gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT
+                                                        : UCD9000_GPI_COUNT;
                entries = devm_kcalloc(&client->dev,
-                                      UCD9000_GPI_COUNT, sizeof(*entries),
+                                      gpi_count, sizeof(*entries),
                                       GFP_KERNEL);
                if (!entries)
                        return -ENOMEM;
 
-               for (i = 0; i < UCD9000_GPI_COUNT; i++) {
+               for (i = 0; i < gpi_count; i++) {
                        entries[i].client = client;
                        entries[i].index = i;
                        scnprintf(name, UCD9000_DEBUGFS_NAME_LEN,
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
new file mode 100644 (file)
index 0000000..3d47806
--- /dev/null
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers
+ *
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define XDPE122_PROT_VR12_5MV          0x01 /* VR12.0 mode, 5-mV DAC */
+#define XDPE122_PROT_VR12_5_10MV       0x02 /* VR12.5 mode, 10-mV DAC */
+#define XDPE122_PROT_IMVP9_10MV                0x03 /* IMVP9 mode, 10-mV DAC */
+#define XDPE122_AMD_625MV              0x10 /* AMD mode 6.25mV */
+#define XDPE122_PAGE_NUM               2
+
+static int xdpe122_identify(struct i2c_client *client,
+                           struct pmbus_driver_info *info)
+{
+       u8 vout_params;
+       int i, ret;
+
+       for (i = 0; i < XDPE122_PAGE_NUM; i++) {
+               /* Read the register with VOUT scaling value.*/
+               ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+               if (ret < 0)
+                       return ret;
+
+               vout_params = ret & GENMASK(4, 0);
+
+               switch (vout_params) {
+               case XDPE122_PROT_VR12_5_10MV:
+                       info->vrm_version[i] = vr13;
+                       break;
+               case XDPE122_PROT_VR12_5MV:
+                       info->vrm_version[i] = vr12;
+                       break;
+               case XDPE122_PROT_IMVP9_10MV:
+                       info->vrm_version[i] = imvp9;
+                       break;
+               case XDPE122_AMD_625MV:
+                       info->vrm_version[i] = amd625mv;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static struct pmbus_driver_info xdpe122_info = {
+       .pages = XDPE122_PAGE_NUM,
+       .format[PSC_VOLTAGE_IN] = linear,
+       .format[PSC_VOLTAGE_OUT] = vid,
+       .format[PSC_TEMPERATURE] = linear,
+       .format[PSC_CURRENT_IN] = linear,
+       .format[PSC_CURRENT_OUT] = linear,
+       .format[PSC_POWER] = linear,
+       .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+               PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+               PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+               PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+       .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+               PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+               PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+               PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+       .identify = xdpe122_identify,
+};
+
+static int xdpe122_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct pmbus_driver_info *info;
+
+       info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info),
+                           GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id xdpe122_id[] = {
+       {"xdpe12254", 0},
+       {"xdpe12284", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, xdpe122_id);
+
+static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+       {.compatible = "infineon, xdpe12254"},
+       {.compatible = "infineon, xdpe12284"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, xdpe122_of_match);
+
+static struct i2c_driver xdpe122_driver = {
+       .driver = {
+               .name = "xdpe12284",
+               .of_match_table = of_match_ptr(xdpe122_of_match),
+       },
+       .probe = xdpe122_probe,
+       .remove = pmbus_do_remove,
+       .id_table = xdpe122_id,
+};
+
+module_i2c_driver(xdpe122_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
+MODULE_LICENSE("GPL");
index 42ffd2e..30b7b3e 100644 (file)
@@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int pwm_fan_suspend(struct device *dev)
+static int pwm_fan_disable(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
        struct pwm_args args;
@@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev)
        return 0;
 }
 
+static void pwm_fan_shutdown(struct platform_device *pdev)
+{
+       pwm_fan_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fan_suspend(struct device *dev)
+{
+       return pwm_fan_disable(dev);
+}
+
 static int pwm_fan_resume(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
@@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
 
 static struct platform_driver pwm_fan_driver = {
        .probe          = pwm_fan_probe,
+       .shutdown       = pwm_fan_shutdown,
        .driver = {
                .name           = "pwm-fan",
                .pm             = &pwm_fan_pm,
index eb171d1..7ffadc2 100644 (file)
@@ -28,8 +28,6 @@
  *  w83627uhg    8      2       2       3      0xa230 0xc1    0x5ca3
  *  w83667hg     9      5       3       3      0xa510 0xc1    0x5ca3
  *  w83667hg-b   9      5       3       4      0xb350 0xc1    0x5ca3
- *  nct6775f     9      4       3       9      0xb470 0xc1    0x5ca3
- *  nct6776f     9      5       3       9      0xC330 0xc1    0x5ca3
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,7 +48,7 @@
 
 enum kinds {
        w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
-       w83667hg, w83667hg_b, nct6775, nct6776,
+       w83667hg, w83667hg_b,
 };
 
 /* used to set data->name = w83627ehf_device_names[data->sio_kind] */
@@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = {
        "w83627uhg",
        "w83667hg",
        "w83667hg",
-       "nct6775",
-       "nct6776",
 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-static unsigned short fan_debounce;
-module_param(fan_debounce, ushort, 0);
-MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
-
 #define DRVNAME "w83627ehf"
 
 /*
@@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
 #define SIO_W83627UHG_ID       0xa230
 #define SIO_W83667HG_ID                0xa510
 #define SIO_W83667HG_B_ID      0xb350
-#define SIO_NCT6775_ID         0xb470
-#define SIO_NCT6776_ID         0xc330
 #define SIO_ID_MASK            0xFFF0
 
 static inline void
@@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
 #define W83627EHF_REG_DIODE            0x59
 #define W83627EHF_REG_SMI_OVT          0x4C
 
-/* NCT6775F has its own fan divider registers */
-#define NCT6775_REG_FANDIV1            0x506
-#define NCT6775_REG_FANDIV2            0x507
-#define NCT6775_REG_FAN_DEBOUNCE       0xf0
-
 #define W83627EHF_REG_ALARM1           0x459
 #define W83627EHF_REG_ALARM2           0x45A
 #define W83627EHF_REG_ALARM3           0x45B
@@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
 
 static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
 
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
-static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
-static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
-static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
-static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
-static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
-static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
-
-static const u16 NCT6775_REG_TEMP[]
-       = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
-static const u16 NCT6775_REG_TEMP_CONFIG[]
-       = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
-static const u16 NCT6775_REG_TEMP_HYST[]
-       = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
-static const u16 NCT6775_REG_TEMP_OVER[]
-       = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
-static const u16 NCT6775_REG_TEMP_SOURCE[]
-       = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
-
 static const char *const w83667hg_b_temp_label[] = {
        "SYSTIN",
        "CPUTIN",
@@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = {
        "PECI Agent 4"
 };
 
-static const char *const nct6775_temp_label[] = {
-       "",
-       "SYSTIN",
-       "CPUTIN",
-       "AUXTIN",
-       "AMD SB-TSI",
-       "PECI Agent 0",
-       "PECI Agent 1",
-       "PECI Agent 2",
-       "PECI Agent 3",
-       "PECI Agent 4",
-       "PECI Agent 5",
-       "PECI Agent 6",
-       "PECI Agent 7",
-       "PCH_CHIP_CPU_MAX_TEMP",
-       "PCH_CHIP_TEMP",
-       "PCH_CPU_TEMP",
-       "PCH_MCH_TEMP",
-       "PCH_DIM0_TEMP",
-       "PCH_DIM1_TEMP",
-       "PCH_DIM2_TEMP",
-       "PCH_DIM3_TEMP"
-};
-
-static const char *const nct6776_temp_label[] = {
-       "",
-       "SYSTIN",
-       "CPUTIN",
-       "AUXTIN",
-       "SMBUSMASTER 0",
-       "SMBUSMASTER 1",
-       "SMBUSMASTER 2",
-       "SMBUSMASTER 3",
-       "SMBUSMASTER 4",
-       "SMBUSMASTER 5",
-       "SMBUSMASTER 6",
-       "SMBUSMASTER 7",
-       "PECI Agent 0",
-       "PECI Agent 1",
-       "PCH_CHIP_CPU_MAX_TEMP",
-       "PCH_CHIP_TEMP",
-       "PCH_CPU_TEMP",
-       "PCH_MCH_TEMP",
-       "PCH_DIM0_TEMP",
-       "PCH_DIM1_TEMP",
-       "PCH_DIM2_TEMP",
-       "PCH_DIM3_TEMP",
-       "BYTE_TEMP"
-};
-
-#define NUM_REG_TEMP   ARRAY_SIZE(NCT6775_REG_TEMP)
+#define NUM_REG_TEMP   ARRAY_SIZE(W83627EHF_REG_TEMP)
 
 static int is_word_sized(u16 reg)
 {
@@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
        return 1350000U / (reg << divreg);
 }
 
-static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
-{
-       if ((reg & 0xff1f) == 0xff1f)
-               return 0;
-
-       reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
-
-       if (reg == 0)
-               return 0;
-
-       return 1350000U / reg;
-}
-
-static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
-{
-       if (reg == 0 || reg == 0xffff)
-               return 0;
-
-       /*
-        * Even though the registers are 16 bit wide, the fan divisor
-        * still applies.
-        */
-       return 1350000U / (reg << divreg);
-}
-
 static inline unsigned int
 div_from_reg(u8 reg)
 {
@@ -418,7 +306,6 @@ struct w83627ehf_data {
        int addr;       /* IO base of hw monitor block */
        const char *name;
 
-       struct device *hwmon_dev;
        struct mutex lock;
 
        u16 reg_temp[NUM_REG_TEMP];
@@ -428,20 +315,10 @@ struct w83627ehf_data {
        u8 temp_src[NUM_REG_TEMP];
        const char * const *temp_label;
 
-       const u16 *REG_PWM;
-       const u16 *REG_TARGET;
-       const u16 *REG_FAN;
-       const u16 *REG_FAN_MIN;
-       const u16 *REG_FAN_START_OUTPUT;
-       const u16 *REG_FAN_STOP_OUTPUT;
-       const u16 *REG_FAN_STOP_TIME;
        const u16 *REG_FAN_MAX_OUTPUT;
        const u16 *REG_FAN_STEP_OUTPUT;
        const u16 *scale_in;
 
-       unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
-       unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
-
        struct mutex update_lock;
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
@@ -457,7 +334,6 @@ struct w83627ehf_data {
        u8 fan_div[5];
        u8 has_fan;             /* some fan inputs can be disabled */
        u8 has_fan_min;         /* some fans don't have min register */
-       bool has_fan_div;
        u8 temp_type[3];
        s8 temp_offset[3];
        s16 temp[9];
@@ -494,6 +370,7 @@ struct w83627ehf_data {
        u16 have_temp_offset;
        u8 in6_skip:1;
        u8 temp3_val_only:1;
+       u8 have_vid:1;
 
 #ifdef CONFIG_PM
        /* Remember extra register values over suspend/resume */
@@ -583,35 +460,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg,
        return w83627ehf_write_value(data, reg, value);
 }
 
-/* This function assumes that the caller holds data->update_lock */
-static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
-{
-       u8 reg;
-
-       switch (nr) {
-       case 0:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
-                   | (data->fan_div[0] & 0x7);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
-               break;
-       case 1:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
-                   | ((data->fan_div[1] << 4) & 0x70);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
-               break;
-       case 2:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
-                   | (data->fan_div[2] & 0x7);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
-               break;
-       case 3:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
-                   | ((data->fan_div[3] << 4) & 0x70);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
-               break;
-       }
-}
-
 /* This function assumes that the caller holds data->update_lock */
 static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
 {
@@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
        }
 }
 
-static void w83627ehf_write_fan_div_common(struct device *dev,
-                                          struct w83627ehf_data *data, int nr)
-{
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
-       if (sio_data->kind == nct6776)
-               ; /* no dividers, do nothing */
-       else if (sio_data->kind == nct6775)
-               nct6775_write_fan_div(data, nr);
-       else
-               w83627ehf_write_fan_div(data, nr);
-}
-
-static void nct6775_update_fan_div(struct w83627ehf_data *data)
-{
-       u8 i;
-
-       i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
-       data->fan_div[0] = i & 0x7;
-       data->fan_div[1] = (i & 0x70) >> 4;
-       i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
-       data->fan_div[2] = i & 0x7;
-       if (data->has_fan & (1<<3))
-               data->fan_div[3] = (i & 0x70) >> 4;
-}
-
 static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
 {
        int i;
@@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
        }
 }
 
-static void w83627ehf_update_fan_div_common(struct device *dev,
-                                           struct w83627ehf_data *data)
-{
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
-       if (sio_data->kind == nct6776)
-               ; /* no dividers, do nothing */
-       else if (sio_data->kind == nct6775)
-               nct6775_update_fan_div(data);
-       else
-               w83627ehf_update_fan_div(data);
-}
-
-static void nct6775_update_pwm(struct w83627ehf_data *data)
-{
-       int i;
-       int pwmcfg, fanmodecfg;
-
-       for (i = 0; i < data->pwm_num; i++) {
-               pwmcfg = w83627ehf_read_value(data,
-                                             W83627EHF_REG_PWM_ENABLE[i]);
-               fanmodecfg = w83627ehf_read_value(data,
-                                                 NCT6775_REG_FAN_MODE[i]);
-               data->pwm_mode[i] =
-                 ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
-               data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
-               data->tolerance[i] = fanmodecfg & 0x0f;
-               data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
-       }
-}
-
 static void w83627ehf_update_pwm(struct w83627ehf_data *data)
 {
        int i;
@@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
                        ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
                data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
                                       & 3) + 1;
-               data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+               data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]);
 
                data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
        }
 }
 
-static void w83627ehf_update_pwm_common(struct device *dev,
-                                       struct w83627ehf_data *data)
-{
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
-               nct6775_update_pwm(data);
-       else
-               w83627ehf_update_pwm(data);
-}
-
 static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
        int i;
 
        mutex_lock(&data->update_lock);
@@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
        if (time_after(jiffies, data->last_updated + HZ + HZ/2)
         || !data->valid) {
                /* Fan clock dividers */
-               w83627ehf_update_fan_div_common(dev, data);
+               w83627ehf_update_fan_div(data);
 
                /* Measured voltages and limits */
                for (i = 0; i < data->in_num; i++) {
@@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
                        if (!(data->has_fan & (1 << i)))
                                continue;
 
-                       reg = w83627ehf_read_value(data, data->REG_FAN[i]);
-                       data->rpm[i] = data->fan_from_reg(reg,
-                                                         data->fan_div[i]);
+                       reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]);
+                       data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]);
 
                        if (data->has_fan_min & (1 << i))
                                data->fan_min[i] = w83627ehf_read_value(data,
-                                          data->REG_FAN_MIN[i]);
+                                          W83627EHF_REG_FAN_MIN[i]);
 
                        /*
                         * If we failed to measure the fan speed and clock
                         * divider can be increased, let's try that for next
                         * time
                         */
-                       if (data->has_fan_div
-                           && (reg >= 0xff || (sio_data->kind == nct6775
-                                               && reg == 0x00))
-                           && data->fan_div[i] < 0x07) {
+                       if (reg >= 0xff && data->fan_div[i] < 0x07) {
                                dev_dbg(dev,
                                        "Increasing fan%d clock divider from %u to %u\n",
                                        i + 1, div_from_reg(data->fan_div[i]),
                                        div_from_reg(data->fan_div[i] + 1));
                                data->fan_div[i]++;
-                               w83627ehf_write_fan_div_common(dev, data, i);
+                               w83627ehf_write_fan_div(data, i);
                                /* Preserve min limit if possible */
                                if ((data->has_fan_min & (1 << i))
                                 && data->fan_min[i] >= 2
                                 && data->fan_min[i] != 255)
                                        w83627ehf_write_value(data,
-                                               data->REG_FAN_MIN[i],
+                                               W83627EHF_REG_FAN_MIN[i],
                                                (data->fan_min[i] /= 2));
                        }
                }
 
-               w83627ehf_update_pwm_common(dev, data);
+               w83627ehf_update_pwm(data);
 
                for (i = 0; i < data->pwm_num; i++) {
                        if (!(data->has_fan & (1 << i)))
@@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 
                        data->fan_start_output[i] =
                          w83627ehf_read_value(data,
-                                              data->REG_FAN_START_OUTPUT[i]);
+                                            W83627EHF_REG_FAN_START_OUTPUT[i]);
                        data->fan_stop_output[i] =
                          w83627ehf_read_value(data,
-                                              data->REG_FAN_STOP_OUTPUT[i]);
+                                            W83627EHF_REG_FAN_STOP_OUTPUT[i]);
                        data->fan_stop_time[i] =
                          w83627ehf_read_value(data,
-                                              data->REG_FAN_STOP_TIME[i]);
+                                              W83627EHF_REG_FAN_STOP_TIME[i]);
 
                        if (data->REG_FAN_MAX_OUTPUT &&
                            data->REG_FAN_MAX_OUTPUT[i] != 0xff)
@@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 
                        data->target_temp[i] =
                                w83627ehf_read_value(data,
-                                       data->REG_TARGET[i]) &
+                                       W83627EHF_REG_TARGET[i]) &
                                        (data->pwm_mode[i] == 1 ? 0x7f : 0xff);
                }
 
@@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
        return data;
 }
 
-/*
- * Sysfs callback functions
- */
-#define show_in_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
-          char *buf) \
-{ \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
-                      data->scale_in)); \
-}
-show_in_reg(in)
-show_in_reg(in_min)
-show_in_reg(in_max)
-
 #define store_in_reg(REG, reg) \
-static ssize_t \
-store_in_##reg(struct device *dev, struct device_attribute *attr, \
-              const char *buf, size_t count) \
+static int \
+store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+              long val) \
 { \
-       struct w83627ehf_data *data = dev_get_drvdata(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       unsigned long val; \
-       int err; \
-       err = kstrtoul(buf, 10, &val); \
-       if (err < 0) \
-               return err; \
+       if (val < 0) \
+               return -EINVAL; \
        mutex_lock(&data->update_lock); \
-       data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
-       w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
-                             data->in_##reg[nr]); \
+       data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \
+       w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \
+                             data->in_##reg[channel]); \
        mutex_unlock(&data->update_lock); \
-       return count; \
+       return 0; \
 }
 
 store_in_reg(MIN, min)
 store_in_reg(MAX, max)
 
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01);
-}
-
-static struct sensor_device_attribute sda_in_input[] = {
-       SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
-       SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
-       SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
-       SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
-       SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
-       SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
-       SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
-       SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
-       SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
-       SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
-};
-
-static struct sensor_device_attribute sda_in_alarm[] = {
-       SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
-       SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
-       SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
-       SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
-       SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
-       SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21),
-       SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20),
-       SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16),
-       SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17),
-       SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19),
-};
-
-static struct sensor_device_attribute sda_in_min[] = {
-       SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
-       SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
-       SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
-       SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
-       SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
-       SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
-       SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
-       SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
-       SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
-       SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
-};
-
-static struct sensor_device_attribute sda_in_max[] = {
-       SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
-       SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
-       SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
-       SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
-       SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
-       SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
-       SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
-       SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
-       SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
-       SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
-};
-
-static ssize_t
-show_fan(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%d\n", data->rpm[nr]);
-}
-
-static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%d\n",
-                      data->fan_from_reg_min(data->fan_min[nr],
-                                             data->fan_div[nr]));
-}
-
-static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *attr,
-            char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
-}
-
-static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *attr,
-             const char *buf, size_t count)
+static int
+store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel,
+             long val)
 {
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       unsigned long val;
-       int err;
        unsigned int reg;
        u8 new_div;
 
-       err = kstrtoul(buf, 10, &val);
-       if (err < 0)
-               return err;
+       if (val < 0)
+               return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       if (!data->has_fan_div) {
-               /*
-                * Only NCT6776F for now, so we know that this is a 13 bit
-                * register
-                */
-               if (!val) {
-                       val = 0xff1f;
-               } else {
-                       if (val > 1350000U)
-                               val = 135000U;
-                       val = 1350000U / val;
-                       val = (val & 0x1f) | ((val << 3) & 0xff00);
-               }
-               data->fan_min[nr] = val;
-               goto done;      /* Leave fan divider alone */
-       }
        if (!val) {
                /* No min limit, alarm disabled */
-               data->fan_min[nr] = 255;
-               new_div = data->fan_div[nr]; /* No change */
-               dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1);
+               data->fan_min[channel] = 255;
+               new_div = data->fan_div[channel]; /* No change */
+               dev_info(dev, "fan%u low limit and alarm disabled\n",
+                        channel + 1);
        } else if ((reg = 1350000U / val) >= 128 * 255) {
                /*
                 * Speed below this value cannot possibly be represented,
                 * even with the highest divider (128)
                 */
-               data->fan_min[nr] = 254;
+               data->fan_min[channel] = 254;
                new_div = 7; /* 128 == (1 << 7) */
                dev_warn(dev,
                         "fan%u low limit %lu below minimum %u, set to minimum\n",
-                        nr + 1, val, data->fan_from_reg_min(254, 7));
+                        channel + 1, val, fan_from_reg8(254, 7));
        } else if (!reg) {
                /*
                 * Speed above this value cannot possibly be represented,
                 * even with the lowest divider (1)
                 */
-               data->fan_min[nr] = 1;
+               data->fan_min[channel] = 1;
                new_div = 0; /* 1 == (1 << 0) */
                dev_warn(dev,
                         "fan%u low limit %lu above maximum %u, set to maximum\n",
-                        nr + 1, val, data->fan_from_reg_min(1, 0));
+                        channel + 1, val, fan_from_reg8(1, 0));
        } else {
                /*
                 * Automatically pick the best divider, i.e. the one such
@@ -1127,390 +763,145 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
                        reg >>= 1;
                        new_div++;
                }
-               data->fan_min[nr] = reg;
+               data->fan_min[channel] = reg;
        }
 
        /*
         * Write both the fan clock divider (if it changed) and the new
         * fan min (unconditionally)
         */
-       if (new_div != data->fan_div[nr]) {
+       if (new_div != data->fan_div[channel]) {
                dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
-                       nr + 1, div_from_reg(data->fan_div[nr]),
+                       channel + 1, div_from_reg(data->fan_div[channel]),
                        div_from_reg(new_div));
-               data->fan_div[nr] = new_div;
-               w83627ehf_write_fan_div_common(dev, data, nr);
+               data->fan_div[channel] = new_div;
+               w83627ehf_write_fan_div(data, channel);
                /* Give the chip time to sample a new speed value */
                data->last_updated = jiffies;
        }
-done:
-       w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
-                             data->fan_min[nr]);
-       mutex_unlock(&data->update_lock);
-
-       return count;
-}
-
-static struct sensor_device_attribute sda_fan_input[] = {
-       SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
-       SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
-       SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
-       SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
-       SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
 
-static struct sensor_device_attribute sda_fan_alarm[] = {
-       SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
-       SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
-       SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
-       SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10),
-       SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23),
-};
-
-static struct sensor_device_attribute sda_fan_min[] = {
-       SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 0),
-       SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 1),
-       SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 2),
-       SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 3),
-       SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 4),
-};
-
-static struct sensor_device_attribute sda_fan_div[] = {
-       SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
-       SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
-       SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
-       SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
-       SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
-};
-
-static ssize_t
-show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
-}
+       w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel],
+                             data->fan_min[channel]);
+       mutex_unlock(&data->update_lock);
 
-#define show_temp_reg(addr, reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
-          char *buf) \
-{ \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
+       return 0;
 }
-show_temp_reg(reg_temp, temp);
-show_temp_reg(reg_temp_over, temp_max);
-show_temp_reg(reg_temp_hyst, temp_max_hyst);
 
 #define store_temp_reg(addr, reg) \
-static ssize_t \
-store_##reg(struct device *dev, struct device_attribute *attr, \
-           const char *buf, size_t count) \
+static int \
+store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+           long val) \
 { \
-       struct w83627ehf_data *data = dev_get_drvdata(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       int err; \
-       long val; \
-       err = kstrtol(buf, 10, &val); \
-       if (err < 0) \
-               return err; \
        mutex_lock(&data->update_lock); \
-       data->reg[nr] = LM75_TEMP_TO_REG(val); \
-       w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
+       data->reg[channel] = LM75_TEMP_TO_REG(val); \
+       w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \
        mutex_unlock(&data->update_lock); \
-       return count; \
+       return 0; \
 }
 store_temp_reg(reg_temp_over, temp_max);
 store_temp_reg(reg_temp_hyst, temp_max_hyst);
 
-static ssize_t
-show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel,
+                 long val)
 {
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
 
-       return sprintf(buf, "%d\n",
-                      data->temp_offset[sensor_attr->index] * 1000);
+       mutex_lock(&data->update_lock);
+       data->temp_offset[channel] = val;
+       w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val);
+       mutex_unlock(&data->update_lock);
+       return 0;
 }
 
-static ssize_t
-store_temp_offset(struct device *dev, struct device_attribute *attr,
-                 const char *buf, size_t count)
+static int
+store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel,
+              long val)
 {
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       long val;
-       int err;
-
-       err = kstrtol(buf, 10, &val);
-       if (err < 0)
-               return err;
+       u16 reg;
 
-       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+       if (val < 0 || val > 1)
+               return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       data->temp_offset[nr] = val;
-       w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val);
+       reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]);
+       data->pwm_mode[channel] = val;
+       reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]);
+       if (!val)
+               reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel];
+       w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg);
        mutex_unlock(&data->update_lock);
-       return count;
+       return 0;
 }
 
-static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_pwm(struct device *dev, struct w83627ehf_data *data, int channel,
+         long val)
 {
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
-}
-
-static struct sensor_device_attribute sda_temp_input[] = {
-       SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
-       SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
-       SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
-       SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
-       SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
-       SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
-       SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
-       SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
-       SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_label[] = {
-       SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
-       SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
-       SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
-       SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
-       SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
-       SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
-       SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
-       SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
-       SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max[] = {
-       SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 0),
-       SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 1),
-       SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 2),
-       SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 3),
-       SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 4),
-       SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 5),
-       SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 6),
-       SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 7),
-       SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 8),
-};
+       val = clamp_val(val, 0, 255);
 
-static struct sensor_device_attribute sda_temp_max_hyst[] = {
-       SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 0),
-       SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 1),
-       SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 2),
-       SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 3),
-       SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 4),
-       SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 5),
-       SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 6),
-       SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 7),
-       SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 8),
-};
+       mutex_lock(&data->update_lock);
+       data->pwm[channel] = val;
+       w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val);
+       mutex_unlock(&data->update_lock);
+       return 0;
+}
 
-static struct sensor_device_attribute sda_temp_alarm[] = {
-       SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
-       SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
-       SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
-};
+static int
+store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel,
+                long val)
+{
+       u16 reg;
 
-static struct sensor_device_attribute sda_temp_type[] = {
-       SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
-       SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
-       SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-};
+       if (!val || val < 0 ||
+           (val > 4 && val != data->pwm_enable_orig[channel]))
+               return -EINVAL;
 
-static struct sensor_device_attribute sda_temp_offset[] = {
-       SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 0),
-       SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 1),
-       SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 2),
-};
+       mutex_lock(&data->update_lock);
+       data->pwm_enable[channel] = val;
+       reg = w83627ehf_read_value(data,
+                                  W83627EHF_REG_PWM_ENABLE[channel]);
+       reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]);
+       reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel];
+       w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel],
+                             reg);
+       mutex_unlock(&data->update_lock);
+       return 0;
+}
 
-#define show_pwm_reg(reg) \
+#define show_tol_temp(reg) \
 static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
-                         char *buf) \
+                               char *buf) \
 { \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
        struct sensor_device_attribute *sensor_attr = \
                to_sensor_dev_attr(attr); \
        int nr = sensor_attr->index; \
-       return sprintf(buf, "%d\n", data->reg[nr]); \
+       return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
 }
 
-show_pwm_reg(pwm_mode)
-show_pwm_reg(pwm_enable)
-show_pwm_reg(pwm)
+show_tol_temp(tolerance)
+show_tol_temp(target_temp)
 
 static ssize_t
-store_pwm_mode(struct device *dev, struct device_attribute *attr,
+store_target_temp(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        int nr = sensor_attr->index;
-       unsigned long val;
+       long val;
        int err;
-       u16 reg;
 
-       err = kstrtoul(buf, 10, &val);
+       err = kstrtol(buf, 10, &val);
        if (err < 0)
                return err;
 
-       if (val > 1)
-               return -EINVAL;
-
-       /* On NCT67766F, DC mode is only supported for pwm1 */
-       if (sio_data->kind == nct6776 && nr && val != 1)
-               return -EINVAL;
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
 
        mutex_lock(&data->update_lock);
-       reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
-       data->pwm_mode[nr] = val;
-       reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
-       if (!val)
-               reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
-       w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
-       mutex_unlock(&data->update_lock);
-       return count;
-}
-
-static ssize_t
-store_pwm(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       unsigned long val;
-       int err;
-
-       err = kstrtoul(buf, 10, &val);
-       if (err < 0)
-               return err;
-
-       val = clamp_val(val, 0, 255);
-
-       mutex_lock(&data->update_lock);
-       data->pwm[nr] = val;
-       w83627ehf_write_value(data, data->REG_PWM[nr], val);
-       mutex_unlock(&data->update_lock);
-       return count;
-}
-
-static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       unsigned long val;
-       int err;
-       u16 reg;
-
-       err = kstrtoul(buf, 10, &val);
-       if (err < 0)
-               return err;
-
-       if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
-               return -EINVAL;
-       /* SmartFan III mode is not supported on NCT6776F */
-       if (sio_data->kind == nct6776 && val == 4)
-               return -EINVAL;
-
-       mutex_lock(&data->update_lock);
-       data->pwm_enable[nr] = val;
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               reg = w83627ehf_read_value(data,
-                                          NCT6775_REG_FAN_MODE[nr]);
-               reg &= 0x0f;
-               reg |= (val - 1) << 4;
-               w83627ehf_write_value(data,
-                                     NCT6775_REG_FAN_MODE[nr], reg);
-       } else {
-               reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
-               reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
-               reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
-               w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
-       }
-       mutex_unlock(&data->update_lock);
-       return count;
-}
-
-
-#define show_tol_temp(reg) \
-static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
-                               char *buf) \
-{ \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
-}
-
-show_tol_temp(tolerance)
-show_tol_temp(target_temp)
-
-static ssize_t
-store_target_temp(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       long val;
-       int err;
-
-       err = kstrtol(buf, 10, &val);
-       if (err < 0)
-               return err;
-
-       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
-
-       mutex_lock(&data->update_lock);
-       data->target_temp[nr] = val;
-       w83627ehf_write_value(data, data->REG_TARGET[nr], val);
+       data->target_temp[nr] = val;
+       w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
        mutex_unlock(&data->update_lock);
        return count;
 }
@@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int nr = sensor_attr->index;
        u16 reg;
@@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
        val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
 
        mutex_lock(&data->update_lock);
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               /* Limit tolerance further for NCT6776F */
-               if (sio_data->kind == nct6776 && val > 7)
-                       val = 7;
-               reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
+       reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+       if (nr == 1)
+               reg = (reg & 0x0f) | (val << 4);
+       else
                reg = (reg & 0xf0) | val;
-               w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
-       } else {
-               reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
-               if (nr == 1)
-                       reg = (reg & 0x0f) | (val << 4);
-               else
-                       reg = (reg & 0xf0) | val;
-               w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
-       }
+       w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
        data->tolerance[nr] = val;
        mutex_unlock(&data->update_lock);
        return count;
 }
 
-static struct sensor_device_attribute sda_pwm[] = {
-       SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0),
-       SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1),
-       SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2),
-       SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_mode[] = {
-       SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 0),
-       SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 1),
-       SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 2),
-       SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_enable[] = {
-       SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 0),
-       SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 1),
-       SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 2),
-       SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 3),
-};
-
-static struct sensor_device_attribute sda_target_temp[] = {
-       SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 0),
-       SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 1),
-       SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 2),
-       SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 3),
-};
-
-static struct sensor_device_attribute sda_tolerance[] = {
-       SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 0),
-       SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 1),
-       SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 2),
-       SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 3),
-};
+static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp,
+           store_target_temp, 0);
+static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp,
+           store_target_temp, 1);
+static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp,
+           store_target_temp, 2);
+static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp,
+           store_target_temp, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance,
+           store_tolerance, 0);
+static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance,
+           store_tolerance, 1);
+static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance,
+           store_tolerance, 2);
+static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance,
+           store_tolerance, 3);
 
 /* Smart Fan registers */
 
@@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = {
 static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
                       char *buf) \
 { \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
        struct sensor_device_attribute *sensor_attr = \
                to_sensor_dev_attr(attr); \
        int nr = sensor_attr->index; \
@@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
        val = clamp_val(val, 1, 255); \
        mutex_lock(&data->update_lock); \
        data->reg[nr] = val; \
-       w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+       w83627ehf_write_value(data, REG[nr], val); \
        mutex_unlock(&data->update_lock); \
        return count; \
 }
 
-fan_functions(fan_start_output, FAN_START_OUTPUT)
-fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
-fan_functions(fan_max_output, FAN_MAX_OUTPUT)
-fan_functions(fan_step_output, FAN_STEP_OUTPUT)
+fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT)
+fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT)
 
 #define fan_time_functions(reg, REG) \
 static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
                                char *buf) \
 { \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
        struct sensor_device_attribute *sensor_attr = \
                to_sensor_dev_attr(attr); \
        int nr = sensor_attr->index; \
@@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
        val = step_time_to_reg(val, data->pwm_mode[nr]); \
        mutex_lock(&data->update_lock); \
        data->reg[nr] = val; \
-       w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+       w83627ehf_write_value(data, REG[nr], val); \
        mutex_unlock(&data->update_lock); \
        return count; \
 } \
 
-fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-
-static ssize_t name_show(struct device *dev, struct device_attribute *attr,
-                        char *buf)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
-       SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 3),
-       SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 3),
-       SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 3),
-       SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 3),
-       SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 3),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
-       SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 2),
-       SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 2),
-       SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 2),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays[] = {
-       SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 0),
-       SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 1),
-       SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 0),
-       SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 1),
-       SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 0),
-       SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 1),
-};
+fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME)
+
+static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 3);
+static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output,
+           store_fan_stop_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 3);
+
+static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 2);
+static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output,
+                   store_fan_stop_output, 2);
+
+static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 1);
+static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 1);
+static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output,
+           store_fan_stop_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output,
+           store_fan_stop_output, 1);
 
 
 /*
  * pwm1 and pwm3 don't support max and step settings on all chips.
  * Need to check support while generating/removing attribute files.
  */
-static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
-       SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 0),
-       SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 0),
-       SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 1),
-       SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 1),
-       SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 2),
-       SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 2),
-};
+static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 0);
+static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 1);
+static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 1);
+static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 2);
 
 static ssize_t
 cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR_RO(cpu0_vid);
+DEVICE_ATTR_RO(cpu0_vid);
 
 
 /* Case open detection */
-
-static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
+              long val)
 {
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-
-       return sprintf(buf, "%d\n",
-               !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
-}
-
-static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       unsigned long val;
-       u16 reg, mask;
+       const u16 mask = 0x80;
+       u16 reg;
 
-       if (kstrtoul(buf, 10, &val) || val != 0)
+       if (val != 0 || channel != 0)
                return -EINVAL;
 
-       mask = to_sensor_dev_attr_2(attr)->nr;
-
        mutex_lock(&data->update_lock);
        reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
        w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
@@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
        data->valid = 0;        /* Force cache refresh */
        mutex_unlock(&data->update_lock);
 
-       return count;
+       return 0;
 }
 
-static struct sensor_device_attribute_2 sda_caseopen[] = {
-       SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
-                       clear_caseopen, 0x80, 0x10),
-       SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
-                       clear_caseopen, 0x40, 0x40),
-};
-
-/*
- * Driver and device management
- */
-
-static void w83627ehf_device_remove_files(struct device *dev)
+static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
+                                      struct attribute *a, int n)
 {
-       /*
-        * some entries in the following arrays may not have been used in
-        * device_create_file(), but device_remove_file() will ignore them
-        */
-       int i;
+       struct device *dev = container_of(kobj, struct device, kobj);
        struct w83627ehf_data *data = dev_get_drvdata(dev);
+       struct device_attribute *devattr;
+       struct sensor_device_attribute *sda;
+
+       devattr = container_of(a, struct device_attribute, attr);
+
+       /* Not sensor */
+       if (devattr->show == cpu0_vid_show && data->have_vid)
+               return a->mode;
+
+       sda = (struct sensor_device_attribute *)devattr;
+
+       if (sda->index < 2 &&
+               (devattr->show == show_fan_stop_time ||
+                devattr->show == show_fan_start_output ||
+                devattr->show == show_fan_stop_output))
+               return a->mode;
+
+       if (sda->index < 3 &&
+               (devattr->show == show_fan_max_output ||
+                devattr->show == show_fan_step_output) &&
+               data->REG_FAN_STEP_OUTPUT &&
+               data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff)
+               return a->mode;
+
+       /* if fan3 and fan4 are enabled create the files for them */
+       if (sda->index == 2 &&
+               (data->has_fan & (1 << 2)) && data->pwm_num >= 3 &&
+               (devattr->show == show_fan_stop_time ||
+                devattr->show == show_fan_start_output ||
+                devattr->show == show_fan_stop_output))
+               return a->mode;
+
+       if (sda->index == 3 &&
+               (data->has_fan & (1 << 3)) && data->pwm_num >= 4 &&
+               (devattr->show == show_fan_stop_time ||
+                devattr->show == show_fan_start_output ||
+                devattr->show == show_fan_stop_output ||
+                devattr->show == show_fan_max_output ||
+                devattr->show == show_fan_step_output))
+               return a->mode;
+
+       if ((devattr->show == show_target_temp ||
+           devattr->show == show_tolerance) &&
+           (data->has_fan & (1 << sda->index)) &&
+           sda->index < data->pwm_num)
+               return a->mode;
 
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
-               device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
-               struct sensor_device_attribute *attr =
-                 &sda_sf3_max_step_arrays[i];
-               if (data->REG_FAN_STEP_OUTPUT &&
-                   data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
-                       device_remove_file(dev, &attr->dev_attr);
-       }
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
-               device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
-               device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
-       for (i = 0; i < data->in_num; i++) {
-               if ((i == 6) && data->in6_skip)
-                       continue;
-               device_remove_file(dev, &sda_in_input[i].dev_attr);
-               device_remove_file(dev, &sda_in_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_in_min[i].dev_attr);
-               device_remove_file(dev, &sda_in_max[i].dev_attr);
-       }
-       for (i = 0; i < 5; i++) {
-               device_remove_file(dev, &sda_fan_input[i].dev_attr);
-               device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_fan_div[i].dev_attr);
-               device_remove_file(dev, &sda_fan_min[i].dev_attr);
-       }
-       for (i = 0; i < data->pwm_num; i++) {
-               device_remove_file(dev, &sda_pwm[i].dev_attr);
-               device_remove_file(dev, &sda_pwm_mode[i].dev_attr);
-               device_remove_file(dev, &sda_pwm_enable[i].dev_attr);
-               device_remove_file(dev, &sda_target_temp[i].dev_attr);
-               device_remove_file(dev, &sda_tolerance[i].dev_attr);
-       }
-       for (i = 0; i < NUM_REG_TEMP; i++) {
-               if (!(data->have_temp & (1 << i)))
-                       continue;
-               device_remove_file(dev, &sda_temp_input[i].dev_attr);
-               device_remove_file(dev, &sda_temp_label[i].dev_attr);
-               if (i == 2 && data->temp3_val_only)
-                       continue;
-               device_remove_file(dev, &sda_temp_max[i].dev_attr);
-               device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
-               if (i > 2)
-                       continue;
-               device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_temp_type[i].dev_attr);
-               device_remove_file(dev, &sda_temp_offset[i].dev_attr);
-       }
+       return 0;
+}
 
-       device_remove_file(dev, &sda_caseopen[0].dev_attr);
-       device_remove_file(dev, &sda_caseopen[1].dev_attr);
+/* These groups handle non-standard attributes used in this device */
+static struct attribute *w83627ehf_attrs[] = {
+
+       &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm1_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_target.dev_attr.attr,
+       &sensor_dev_attr_pwm1_tolerance.dev_attr.attr,
+
+       &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm2_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_target.dev_attr.attr,
+       &sensor_dev_attr_pwm2_tolerance.dev_attr.attr,
+
+       &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm3_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_target.dev_attr.attr,
+       &sensor_dev_attr_pwm3_tolerance.dev_attr.attr,
+
+       &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm4_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_target.dev_attr.attr,
+       &sensor_dev_attr_pwm4_tolerance.dev_attr.attr,
+
+       &dev_attr_cpu0_vid.attr,
+       NULL
+};
 
-       device_remove_file(dev, &dev_attr_name);
-       device_remove_file(dev, &dev_attr_cpu0_vid);
-}
+static const struct attribute_group w83627ehf_group = {
+       .attrs = w83627ehf_attrs,
+       .is_visible = w83627ehf_attrs_visible,
+};
+
+static const struct attribute_group *w83627ehf_groups[] = {
+       &w83627ehf_group,
+       NULL
+};
+
+/*
+ * Driver and device management
+ */
 
 /* Get the monitoring functions started */
 static inline void w83627ehf_init_device(struct w83627ehf_data *data,
@@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
        }
 }
 
-static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
-                                  int r1, int r2)
-{
-       swap(data->temp_src[r1], data->temp_src[r2]);
-       swap(data->reg_temp[r1], data->reg_temp[r2]);
-       swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
-       swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
-       swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
-}
-
 static void
 w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
 {
@@ -1954,7 +1293,7 @@ static void
 w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
                           struct w83627ehf_data *data)
 {
-       int fan3pin, fan4pin, fan4min, fan5pin, regval;
+       int fan3pin, fan4pin, fan5pin, regval;
 
        /* The W83627UHG is simple, only two fan inputs, no config */
        if (sio_data->kind == w83627uhg) {
@@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
        }
 
        /* fan4 and fan5 share some pins with the GPIO and serial flash */
-       if (sio_data->kind == nct6775) {
-               /* On NCT6775, fan4 shares pins with the fdc interface */
-               fan3pin = 1;
-               fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
-               fan4min = 0;
-               fan5pin = 0;
-       } else if (sio_data->kind == nct6776) {
-               bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
-
-               superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
-               regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
-
-               if (regval & 0x80)
-                       fan3pin = gpok;
-               else
-                       fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-
-               if (regval & 0x40)
-                       fan4pin = gpok;
-               else
-                       fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-
-               if (regval & 0x20)
-                       fan5pin = gpok;
-               else
-                       fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
-
-               fan4min = fan4pin;
-       } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+       if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
                fan3pin = 1;
                fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
                fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
-               fan4min = fan4pin;
        } else {
                fan3pin = 1;
                fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
                fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
-               fan4min = fan4pin;
        }
 
        data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
        data->has_fan |= (fan3pin << 2);
        data->has_fan_min |= (fan3pin << 2);
 
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               /*
-                * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
-                * register
-                */
-               data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
-               data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
-       } else {
-               /*
-                * It looks like fan4 and fan5 pins can be alternatively used
-                * as fan on/off switches, but fan5 control is write only :/
-                * We assume that if the serial interface is disabled, designers
-                * connected fan5 as input unless they are emitting log 1, which
-                * is not the default.
-                */
-               regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
-               if ((regval & (1 << 2)) && fan4pin) {
-                       data->has_fan |= (1 << 3);
-                       data->has_fan_min |= (1 << 3);
+       /*
+        * It looks like fan4 and fan5 pins can be alternatively used
+        * as fan on/off switches, but fan5 control is write only :/
+        * We assume that if the serial interface is disabled, designers
+        * connected fan5 as input unless they are emitting log 1, which
+        * is not the default.
+        */
+       regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+       if ((regval & (1 << 2)) && fan4pin) {
+               data->has_fan |= (1 << 3);
+               data->has_fan_min |= (1 << 3);
+       }
+       if (!(regval & (1 << 1)) && fan5pin) {
+               data->has_fan |= (1 << 4);
+               data->has_fan_min |= (1 << 4);
+       }
+}
+
+static umode_t
+w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+                    u32 attr, int channel)
+{
+       const struct w83627ehf_data *data = drvdata;
+
+       switch (type) {
+       case hwmon_temp:
+               /* channel 0.., name 1.. */
+               if (!(data->have_temp & (1 << channel)))
+                       return 0;
+               if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+                       return 0444;
+               if (channel == 2 && data->temp3_val_only)
+                       return 0;
+               if (attr == hwmon_temp_max) {
+                       if (data->reg_temp_over[channel])
+                               return 0644;
+                       else
+                               return 0;
+               }
+               if (attr == hwmon_temp_max_hyst) {
+                       if (data->reg_temp_hyst[channel])
+                               return 0644;
+                       else
+                               return 0;
+               }
+               if (channel > 2)
+                       return 0;
+               if (attr == hwmon_temp_alarm || attr == hwmon_temp_type)
+                       return 0444;
+               if (attr == hwmon_temp_offset) {
+                       if (data->have_temp_offset & (1 << channel))
+                               return 0644;
+                       else
+                               return 0;
+               }
+               break;
+
+       case hwmon_fan:
+               /* channel 0.., name 1.. */
+               if (!(data->has_fan & (1 << channel)))
+                       return 0;
+               if (attr == hwmon_fan_input || attr == hwmon_fan_alarm)
+                       return 0444;
+               if (attr == hwmon_fan_div) {
+                       return 0444;
                }
-               if (!(regval & (1 << 1)) && fan5pin) {
-                       data->has_fan |= (1 << 4);
-                       data->has_fan_min |= (1 << 4);
+               if (attr == hwmon_fan_min) {
+                       if (data->has_fan_min & (1 << channel))
+                               return 0644;
+                       else
+                               return 0;
+               }
+               break;
+
+       case hwmon_in:
+               /* channel 0.., name 0.. */
+               if (channel >= data->in_num)
+                       return 0;
+               if (channel == 6 && data->in6_skip)
+                       return 0;
+               if (attr == hwmon_in_alarm || attr == hwmon_in_input)
+                       return 0444;
+               if (attr == hwmon_in_min || attr == hwmon_in_max)
+                       return 0644;
+               break;
+
+       case hwmon_pwm:
+               /* channel 0.., name 1.. */
+               if (!(data->has_fan & (1 << channel)) ||
+                   channel >= data->pwm_num)
+                       return 0;
+               if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable ||
+                   attr == hwmon_pwm_input)
+                       return 0644;
+               break;
+
+       case hwmon_intrusion:
+               return 0644;
+
+       default: /* Shouldn't happen */
+               return 0;
+       }
+
+       return 0; /* Shouldn't happen */
+}
+
+static int
+w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
+                      int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_temp_input:
+               *val = LM75_TEMP_FROM_REG(data->temp[channel]);
+               return 0;
+       case hwmon_temp_max:
+               *val = LM75_TEMP_FROM_REG(data->temp_max[channel]);
+               return 0;
+       case hwmon_temp_max_hyst:
+               *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]);
+               return 0;
+       case hwmon_temp_offset:
+               *val = data->temp_offset[channel] * 1000;
+               return 0;
+       case hwmon_temp_type:
+               *val = (int)data->temp_type[channel];
+               return 0;
+       case hwmon_temp_alarm:
+               if (channel < 3) {
+                       int bit[] = { 4, 5, 13 };
+                       *val = (data->alarms >> bit[channel]) & 1;
+                       return 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
+                    int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_in_input:
+               *val = in_from_reg(data->in[channel], channel, data->scale_in);
+               return 0;
+       case hwmon_in_min:
+               *val = in_from_reg(data->in_min[channel], channel,
+                                  data->scale_in);
+               return 0;
+       case hwmon_in_max:
+               *val = in_from_reg(data->in_max[channel], channel,
+                                  data->scale_in);
+               return 0;
+       case hwmon_in_alarm:
+               if (channel < 10) {
+                       int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+                       *val = (data->alarms >> bit[channel]) & 1;
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
+                     int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_fan_input:
+               *val = data->rpm[channel];
+               return 0;
+       case hwmon_fan_min:
+               *val = fan_from_reg8(data->fan_min[channel],
+                                    data->fan_div[channel]);
+               return 0;
+       case hwmon_fan_div:
+               *val = div_from_reg(data->fan_div[channel]);
+               return 0;
+       case hwmon_fan_alarm:
+               if (channel < 5) {
+                       int bit[] = { 6, 7, 11, 10, 23 };
+                       *val = (data->alarms >> bit[channel]) & 1;
+                       return 0;
                }
+               break;
+       default:
+               break;
        }
+       return -EOPNOTSUPP;
 }
 
+static int
+w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr,
+                     int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_pwm_input:
+               *val = data->pwm[channel];
+               return 0;
+       case hwmon_pwm_enable:
+               *val = data->pwm_enable[channel];
+               return 0;
+       case hwmon_pwm_mode:
+               *val = data->pwm_enable[channel];
+               return 0;
+       default:
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr,
+                           int channel, long *val)
+{
+       if (attr != hwmon_intrusion_alarm || channel != 0)
+               return -EOPNOTSUPP; /* shouldn't happen */
+
+       *val = !!(data->caseopen & 0x10);
+       return 0;
+}
+
+static int
+w83627ehf_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent);
+
+       switch (type) {
+       case hwmon_fan:
+               return w83627ehf_do_read_fan(data, attr, channel, val);
+
+       case hwmon_in:
+               return w83627ehf_do_read_in(data, attr, channel, val);
+
+       case hwmon_pwm:
+               return w83627ehf_do_read_pwm(data, attr, channel, val);
+
+       case hwmon_temp:
+               return w83627ehf_do_read_temp(data, attr, channel, val);
+
+       case hwmon_intrusion:
+               return w83627ehf_do_read_intrusion(data, attr, channel, val);
+
+       default:
+               break;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type,
+                     u32 attr, int channel, const char **str)
+{
+       struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+       switch (type) {
+       case hwmon_temp:
+               if (attr == hwmon_temp_label) {
+                       *str = data->temp_label[data->temp_src[channel]];
+                       return 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+       /* Nothing else should be read as a string */
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_write(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long val)
+{
+       struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+       if (type == hwmon_in && attr == hwmon_in_min)
+               return store_in_min(dev, data, channel, val);
+       if (type == hwmon_in && attr == hwmon_in_max)
+               return store_in_max(dev, data, channel, val);
+
+       if (type == hwmon_fan && attr == hwmon_fan_min)
+               return store_fan_min(dev, data, channel, val);
+
+       if (type == hwmon_temp && attr == hwmon_temp_max)
+               return store_temp_max(dev, data, channel, val);
+       if (type == hwmon_temp && attr == hwmon_temp_max_hyst)
+               return store_temp_max_hyst(dev, data, channel, val);
+       if (type == hwmon_temp && attr == hwmon_temp_offset)
+               return store_temp_offset(dev, data, channel, val);
+
+       if (type == hwmon_pwm && attr == hwmon_pwm_mode)
+               return store_pwm_mode(dev, data, channel, val);
+       if (type == hwmon_pwm && attr == hwmon_pwm_enable)
+               return store_pwm_enable(dev, data, channel, val);
+       if (type == hwmon_pwm && attr == hwmon_pwm_input)
+               return store_pwm(dev, data, channel, val);
+
+       if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm)
+               return clear_caseopen(dev, data, channel, val);
+
+       return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops w83627ehf_ops = {
+       .is_visible = w83627ehf_is_visible,
+       .read = w83627ehf_read,
+       .read_string = w83627ehf_read_string,
+       .write = w83627ehf_write,
+};
+
+static const struct hwmon_channel_info *w83627ehf_info[] = {
+       HWMON_CHANNEL_INFO(fan,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN),
+       HWMON_CHANNEL_INFO(in,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN),
+       HWMON_CHANNEL_INFO(pwm,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE),
+       HWMON_CHANNEL_INFO(temp,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE),
+       HWMON_CHANNEL_INFO(intrusion,
+               HWMON_INTRUSION_ALARM),
+       NULL
+};
+
+static const struct hwmon_chip_info w83627ehf_chip_info = {
+       .ops = &w83627ehf_ops,
+       .info = w83627ehf_info,
+};
+
 static int w83627ehf_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
        struct resource *res;
        u8 en_vrm10;
        int i, err = 0;
+       struct device *hwmon_dev;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
        if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
@@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev)
 
        /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
        data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
-       /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+       /* 667HG has 3 pwms, and 627UHG has only 2 */
        switch (sio_data->kind) {
        default:
                data->pwm_num = 4;
                break;
        case w83667hg:
        case w83667hg_b:
-       case nct6775:
-       case nct6776:
                data->pwm_num = 3;
                break;
        case w83627uhg:
@@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
        data->have_temp = 0x07;
 
        /* Deal with temperature register setup first. */
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               int mask = 0;
-
-               /*
-                * Display temperature sensor output only if it monitors
-                * a source other than one already reported. Always display
-                * first three temperature registers, though.
-                */
-               for (i = 0; i < NUM_REG_TEMP; i++) {
-                       u8 src;
-
-                       data->reg_temp[i] = NCT6775_REG_TEMP[i];
-                       data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
-                       data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
-                       data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
-
-                       src = w83627ehf_read_value(data,
-                                                  NCT6775_REG_TEMP_SOURCE[i]);
-                       src &= 0x1f;
-                       if (src && !(mask & (1 << src))) {
-                               data->have_temp |= 1 << i;
-                               mask |= 1 << src;
-                       }
-
-                       data->temp_src[i] = src;
-
-                       /*
-                        * Now do some register swapping if index 0..2 don't
-                        * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
-                        * Idea is to have the first three attributes
-                        * report SYSTIN, CPUIN, and AUXIN if possible
-                        * without overriding the basic system configuration.
-                        */
-                       if (i > 0 && data->temp_src[0] != 1
-                           && data->temp_src[i] == 1)
-                               w82627ehf_swap_tempreg(data, 0, i);
-                       if (i > 1 && data->temp_src[1] != 2
-                           && data->temp_src[i] == 2)
-                               w82627ehf_swap_tempreg(data, 1, i);
-                       if (i > 2 && data->temp_src[2] != 3
-                           && data->temp_src[i] == 3)
-                               w82627ehf_swap_tempreg(data, 2, i);
-               }
-               if (sio_data->kind == nct6776) {
-                       /*
-                        * On NCT6776, AUXTIN and VIN3 pins are shared.
-                        * Only way to detect it is to check if AUXTIN is used
-                        * as a temperature source, and if that source is
-                        * enabled.
-                        *
-                        * If that is the case, disable in6, which reports VIN3.
-                        * Otherwise disable temp3.
-                        */
-                       if (data->temp_src[2] == 3) {
-                               u8 reg;
-
-                               if (data->reg_temp_config[2])
-                                       reg = w83627ehf_read_value(data,
-                                               data->reg_temp_config[2]);
-                               else
-                                       reg = 0; /* Assume AUXTIN is used */
-
-                               if (reg & 0x01)
-                                       data->have_temp &= ~(1 << 2);
-                               else
-                                       data->in6_skip = 1;
-                       }
-                       data->temp_label = nct6776_temp_label;
-               } else {
-                       data->temp_label = nct6775_temp_label;
-               }
-               data->have_temp_offset = data->have_temp & 0x07;
-               for (i = 0; i < 3; i++) {
-                       if (data->temp_src[i] > 3)
-                               data->have_temp_offset &= ~(1 << i);
-               }
-       } else if (sio_data->kind == w83667hg_b) {
+       if (sio_data->kind == w83667hg_b) {
                u8 reg;
 
                w83627ehf_set_temp_reg_ehf(data, 4);
@@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
                data->have_temp_offset = data->have_temp & 0x07;
        }
 
-       if (sio_data->kind == nct6775) {
-               data->has_fan_div = true;
-               data->fan_from_reg = fan_from_reg16;
-               data->fan_from_reg_min = fan_from_reg8;
-               data->REG_PWM = NCT6775_REG_PWM;
-               data->REG_TARGET = NCT6775_REG_TARGET;
-               data->REG_FAN = NCT6775_REG_FAN;
-               data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
-               data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
-               data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
-       } else if (sio_data->kind == nct6776) {
-               data->has_fan_div = false;
-               data->fan_from_reg = fan_from_reg13;
-               data->fan_from_reg_min = fan_from_reg13;
-               data->REG_PWM = NCT6775_REG_PWM;
-               data->REG_TARGET = NCT6775_REG_TARGET;
-               data->REG_FAN = NCT6775_REG_FAN;
-               data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
-       } else if (sio_data->kind == w83667hg_b) {
-               data->has_fan_div = true;
-               data->fan_from_reg = fan_from_reg8;
-               data->fan_from_reg_min = fan_from_reg8;
-               data->REG_PWM = W83627EHF_REG_PWM;
-               data->REG_TARGET = W83627EHF_REG_TARGET;
-               data->REG_FAN = W83627EHF_REG_FAN;
-               data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
+       if (sio_data->kind == w83667hg_b) {
                data->REG_FAN_MAX_OUTPUT =
                  W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
                data->REG_FAN_STEP_OUTPUT =
                  W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
        } else {
-               data->has_fan_div = true;
-               data->fan_from_reg = fan_from_reg8;
-               data->fan_from_reg_min = fan_from_reg8;
-               data->REG_PWM = W83627EHF_REG_PWM;
-               data->REG_TARGET = W83627EHF_REG_TARGET;
-               data->REG_FAN = W83627EHF_REG_FAN;
-               data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
                data->REG_FAN_MAX_OUTPUT =
                  W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
                data->REG_FAN_STEP_OUTPUT =
@@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
                goto exit_release;
 
        /* Read VID value */
-       if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
-           sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+       if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
                /*
                 * W83667HG has different pins for VID input and output, so
                 * we can get the VID input values directly at logical device D
@@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
                 */
                superio_select(sio_data->sioreg, W83667HG_LD_VID);
                data->vid = superio_inb(sio_data->sioreg, 0xe3);
-               err = device_create_file(dev, &dev_attr_cpu0_vid);
-               if (err) {
-                       superio_exit(sio_data->sioreg);
-                       goto exit_release;
-               }
+               data->have_vid = true;
        } else if (sio_data->kind != w83627uhg) {
                superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
                if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
@@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev)
                                                SIO_REG_VID_DATA);
                        if (sio_data->kind == w83627ehf) /* 6 VID pins only */
                                data->vid &= 0x3f;
-
-                       err = device_create_file(dev, &dev_attr_cpu0_vid);
-                       if (err) {
-                               superio_exit(sio_data->sioreg);
-                               goto exit_release;
-                       }
+                       data->have_vid = true;
                } else {
                        dev_info(dev,
                                 "VID pins in output mode, CPU VID not available\n");
                }
        }
 
-       if (fan_debounce &&
-           (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
-               u8 tmp;
-
-               superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
-               tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
-               if (sio_data->kind == nct6776)
-                       superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
-                                    0x3e | tmp);
-               else
-                       superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
-                                    0x1e | tmp);
-               pr_info("Enabled fan debounce for chip %s\n", data->name);
-       }
-
        w83627ehf_check_fan_inputs(sio_data, data);
 
        superio_exit(sio_data->sioreg);
 
        /* Read fan clock dividers immediately */
-       w83627ehf_update_fan_div_common(dev, data);
+       w83627ehf_update_fan_div(data);
 
        /* Read pwm data to save original values */
-       w83627ehf_update_pwm_common(dev, data);
+       w83627ehf_update_pwm(data);
        for (i = 0; i < data->pwm_num; i++)
                data->pwm_enable_orig[i] = data->pwm_enable[i];
 
-       /* Register sysfs hooks */
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
-               err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
-               struct sensor_device_attribute *attr =
-                 &sda_sf3_max_step_arrays[i];
-               if (data->REG_FAN_STEP_OUTPUT &&
-                   data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
-                       err = device_create_file(dev, &attr->dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       }
-       /* if fan3 and fan4 are enabled create the sf3 files for them */
-       if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
-               for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
-                       err = device_create_file(dev,
-                                       &sda_sf3_arrays_fan3[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
-               for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
-                       err = device_create_file(dev,
-                                       &sda_sf3_arrays_fan4[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-
-       for (i = 0; i < data->in_num; i++) {
-               if ((i == 6) && data->in6_skip)
-                       continue;
-               if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_in_alarm[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_in_min[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_in_max[i].dev_attr)))
-                       goto exit_remove;
-       }
-
-       for (i = 0; i < 5; i++) {
-               if (data->has_fan & (1 << i)) {
-                       if ((err = device_create_file(dev,
-                                       &sda_fan_input[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_fan_alarm[i].dev_attr)))
-                               goto exit_remove;
-                       if (sio_data->kind != nct6776) {
-                               err = device_create_file(dev,
-                                               &sda_fan_div[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       if (data->has_fan_min & (1 << i)) {
-                               err = device_create_file(dev,
-                                               &sda_fan_min[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       if (i < data->pwm_num &&
-                               ((err = device_create_file(dev,
-                                       &sda_pwm[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_pwm_mode[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_pwm_enable[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_target_temp[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_tolerance[i].dev_attr))))
-                               goto exit_remove;
-               }
-       }
-
-       for (i = 0; i < NUM_REG_TEMP; i++) {
-               if (!(data->have_temp & (1 << i)))
-                       continue;
-               err = device_create_file(dev, &sda_temp_input[i].dev_attr);
-               if (err)
-                       goto exit_remove;
-               if (data->temp_label) {
-                       err = device_create_file(dev,
-                                                &sda_temp_label[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (i == 2 && data->temp3_val_only)
-                       continue;
-               if (data->reg_temp_over[i]) {
-                       err = device_create_file(dev,
-                               &sda_temp_max[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->reg_temp_hyst[i]) {
-                       err = device_create_file(dev,
-                               &sda_temp_max_hyst[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (i > 2)
-                       continue;
-               if ((err = device_create_file(dev,
-                               &sda_temp_alarm[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_temp_type[i].dev_attr)))
-                       goto exit_remove;
-               if (data->have_temp_offset & (1 << i)) {
-                       err = device_create_file(dev,
-                                                &sda_temp_offset[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       }
-
-       err = device_create_file(dev, &sda_caseopen[0].dev_attr);
-       if (err)
-               goto exit_remove;
-
-       if (sio_data->kind == nct6776) {
-               err = device_create_file(dev, &sda_caseopen[1].dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+                                                        data->name,
+                                                        data,
+                                                        &w83627ehf_chip_info,
+                                                        w83627ehf_groups);
 
-       err = device_create_file(dev, &dev_attr_name);
-       if (err)
-               goto exit_remove;
-
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 
-       return 0;
-
-exit_remove:
-       w83627ehf_device_remove_files(dev);
 exit_release:
        release_region(res->start, IOREGION_LENGTH);
 exit:
@@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev)
 {
        struct w83627ehf_data *data = platform_get_drvdata(pdev);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       w83627ehf_device_remove_files(&pdev->dev);
        release_region(data->addr, IOREGION_LENGTH);
 
        return 0;
@@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev)
 static int w83627ehf_suspend(struct device *dev)
 {
        struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        mutex_lock(&data->update_lock);
        data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
-       if (sio_data->kind == nct6775) {
-               data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
-               data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
-       }
        mutex_unlock(&data->update_lock);
 
        return 0;
@@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev)
 static int w83627ehf_resume(struct device *dev)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        int i;
 
        mutex_lock(&data->update_lock);
@@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev)
                if (!(data->has_fan_min & (1 << i)))
                        continue;
 
-               w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+               w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i],
                                      data->fan_min[i]);
        }
 
@@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev)
 
        /* Restore other settings */
        w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
-       if (sio_data->kind == nct6775) {
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
-       }
 
        /* Force re-reading all values */
        data->valid = 0;
@@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
        static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
        static const char sio_name_W83667HG[] __initconst = "W83667HG";
        static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
-       static const char sio_name_NCT6775[] __initconst = "NCT6775F";
-       static const char sio_name_NCT6776[] __initconst = "NCT6776F";
 
        u16 val;
        const char *sio_name;
@@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
                sio_data->kind = w83667hg_b;
                sio_name = sio_name_W83667HG_B;
                break;
-       case SIO_NCT6775_ID:
-               sio_data->kind = nct6775;
-               sio_name = sio_name_NCT6775;
-               break;
-       case SIO_NCT6776_ID:
-               sio_data->kind = nct6776;
-               sio_name = sio_name_NCT6776;
-               break;
        default:
                if (val != 0xffff)
                        pr_debug("unsupported chip ID: 0x%04x\n", val);
index 0436916..7f8f896 100644 (file)
@@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = {
        .groups = i3c_masterdev_groups,
 };
 
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
-                    unsigned long max_i2c_scl_rate)
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+                           unsigned long max_i2c_scl_rate)
 {
        struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
 
index b0ff0e1..bd26c3b 100644 (file)
@@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
        struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
        struct i3c_master_controller *m = i3c_dev_get_master(dev);
        struct dw_i3c_master *master = to_dw_i3c_master(m);
+       int pos;
+
+       pos = dw_i3c_master_get_free_pos(master);
+
+       if (data->index > pos && pos > 0) {
+               writel(0,
+                      master->regs +
+                      DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+               master->addrs[data->index] = 0;
+               master->free_pos |= BIT(data->index);
+
+               data->index = pos;
+               master->addrs[pos] = dev->info.dyn_addr;
+               master->free_pos &= ~BIT(pos);
+       }
 
        writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
               master->regs +
@@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
 static int dw_i3c_probe(struct platform_device *pdev)
 {
        struct dw_i3c_master *master;
-       struct resource *res;
        int ret, irq;
 
        master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
        if (!master)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       master->regs = devm_ioremap_resource(&pdev->dev, res);
+       master->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(master->regs))
                return PTR_ERR(master->regs);
 
index 10db0bf..5471279 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
+#include <linux/of_device.h>
 
 #define DEV_ID                         0x0
 #define DEV_ID_I3C_MASTER              0x5034
@@ -60,6 +61,7 @@
 #define CTRL_HALT_EN                   BIT(30)
 #define CTRL_MCS                       BIT(29)
 #define CTRL_MCS_EN                    BIT(28)
+#define CTRL_THD_DELAY(x)              (((x) << 24) & GENMASK(25, 24))
 #define CTRL_HJ_DISEC                  BIT(8)
 #define CTRL_MST_ACK                   BIT(7)
 #define CTRL_HJ_ACK                    BIT(6)
@@ -70,6 +72,7 @@
 #define CTRL_MIXED_FAST_BUS_MODE       2
 #define CTRL_MIXED_SLOW_BUS_MODE       3
 #define CTRL_BUS_MODE_MASK             GENMASK(1, 0)
+#define THD_DELAY_MAX                  3
 
 #define PRESCL_CTRL0                   0x14
 #define PRESCL_CTRL0_I2C(x)            ((x) << 16)
@@ -388,6 +391,10 @@ struct cdns_i3c_xfer {
        struct cdns_i3c_cmd cmds[0];
 };
 
+struct cdns_i3c_data {
+       u8 thd_delay_ns;
+};
+
 struct cdns_i3c_master {
        struct work_struct hj_work;
        struct i3c_master_controller base;
@@ -408,6 +415,7 @@ struct cdns_i3c_master {
        struct clk *pclk;
        struct cdns_i3c_master_caps caps;
        unsigned long i3c_scl_lim;
+       const struct cdns_i3c_data *devdata;
 };
 
 static inline struct cdns_i3c_master *
@@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
        return 0;
 }
 
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+       unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+       u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+                                   (NSEC_PER_SEC / sysclk_rate));
+
+       /* Every value greater than 3 is not valid. */
+       if (thd_delay > THD_DELAY_MAX)
+               thd_delay = THD_DELAY_MAX;
+
+       /* CTLR_THD_DEL value is encoded. */
+       return (THD_DELAY_MAX - thd_delay);
+}
+
 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
 {
        struct cdns_i3c_master *master = to_cdns_i3c_master(m);
@@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
         * We will issue ENTDAA afterwards from the threaded IRQ handler.
         */
        ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+       /*
+        * Configure data hold delay based on device-specific data.
+        *
+        * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+        * master output. This setting allows to meet this timing on master's
+        * SoC outputs, regardless of PCB balancing.
+        */
+       ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
        writel(ctrl, master->regs + CTRL);
 
        cdns_i3c_master_enable(master);
@@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work)
        i3c_master_do_daa(&master->base);
 }
 
+static struct cdns_i3c_data cdns_i3c_devdata = {
+       .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+       { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+       { /* sentinel */ },
+};
+
 static int cdns_i3c_master_probe(struct platform_device *pdev)
 {
        struct cdns_i3c_master *master;
-       struct resource *res;
        int ret, irq;
        u32 val;
 
@@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
        if (!master)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       master->regs = devm_ioremap_resource(&pdev->dev, res);
+       master->devdata = of_device_get_match_data(&pdev->dev);
+       if (!master->devdata)
+               return -EINVAL;
+
+       master->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(master->regs))
                return PTR_ERR(master->regs);
 
@@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id cdns_i3c_master_of_ids[] = {
-       { .compatible = "cdns,i3c-master" },
-       { /* sentinel */ },
-};
-
 static struct platform_driver cdns_i3c_master = {
        .probe = cdns_i3c_master_probe,
        .remove = cdns_i3c_master_remove,
index a1a0352..b273e42 100644 (file)
@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
        }
 }
 
-static void
-isert_wait4cmds(struct iscsi_conn *conn)
-{
-       isert_info("iscsi_conn %p\n", conn);
-
-       if (conn->sess) {
-               target_sess_cmd_list_set_waiting(conn->sess->se_sess);
-               target_wait_for_sess_cmds(conn->sess->se_sess);
-       }
-}
-
 /**
  * isert_put_unsol_pending_cmds() - Drop commands waiting for
  *     unsolicitate dataout
@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
 
        ib_drain_qp(isert_conn->qp);
        isert_put_unsol_pending_cmds(conn);
-       isert_wait4cmds(conn);
        isert_wait4logout(isert_conn);
 
        queue_work(isert_release_wq, &isert_conn->release_work);
index f918fca..cb6e3a5 100644 (file)
@@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
        struct evdev_client *client;
        int error;
 
-       client = kzalloc(struct_size(client, buffer, bufsize),
-                        GFP_KERNEL | __GFP_NOWARN);
-       if (!client)
-               client = vzalloc(struct_size(client, buffer, bufsize));
+       client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
        if (!client)
                return -ENOMEM;
 
index 83368f1..4650f4a 100644 (file)
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
        int retval = 0;
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+                                0x11, 0x40, 0x5601, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
                        __func__, retval);
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
        }
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+                                0x44, 0x40, 0x0, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
                        __func__, retval);
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
        }
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+                                0x22, 0x40, 0x0, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
                        __func__, retval);
index 4d875f2..ee55f22 100644 (file)
@@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
        return input_register_device(onkey->input);
 }
 
+static const struct of_device_id max77650_onkey_of_match[] = {
+       { .compatible = "maxim,max77650-onkey" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
 static struct platform_driver max77650_onkey_driver = {
        .driver = {
                .name = "max77650-onkey",
+               .of_match_table = max77650_onkey_of_match,
        },
        .probe = max77650_onkey_probe,
 };
index ecd762f..53ad25e 100644 (file)
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
 
        if (regs->enable_mask)
                rc = regmap_update_bits(vib->regmap, regs->enable_addr,
-                                       on ? regs->enable_mask : 0, val);
+                                       regs->enable_mask, on ? ~0 : 0);
 
        return rc;
 }
index 0bc01cf..6b23e67 100644 (file)
 #define F54_NUM_TX_OFFSET       1
 #define F54_NUM_RX_OFFSET       0
 
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE   32
+
 /* F54 commands */
 #define F54_GET_REPORT          1
 #define F54_FORCE_CAL           2
@@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work)
        int report_size;
        u8 command;
        int error;
+       int i;
 
        report_size = rmi_f54_get_report_size(f54);
        if (report_size == 0) {
@@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work)
 
        rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
 
-       fifo[0] = 0;
-       fifo[1] = 0;
-       error = rmi_write_block(fn->rmi_dev,
-                               fn->fd.data_base_addr + F54_FIFO_OFFSET,
-                               fifo, sizeof(fifo));
-       if (error) {
-               dev_err(&fn->dev, "Failed to set fifo start offset\n");
-               goto abort;
-       }
+       for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+               int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+               fifo[0] = i & 0xff;
+               fifo[1] = i >> 8;
+               error = rmi_write_block(fn->rmi_dev,
+                                       fn->fd.data_base_addr + F54_FIFO_OFFSET,
+                                       fifo, sizeof(fifo));
+               if (error) {
+                       dev_err(&fn->dev, "Failed to set fifo start offset\n");
+                       goto abort;
+               }
 
-       error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
-                              F54_REPORT_DATA_OFFSET, f54->report_data,
-                              report_size);
-       if (error) {
-               dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
-                       __func__, report_size, error);
-               goto abort;
+               error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+                                      F54_REPORT_DATA_OFFSET,
+                                      f54->report_data + i, size);
+               if (error) {
+                       dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+                               __func__, size, error);
+                       goto abort;
+               }
        }
 
 abort:
index b313c57..2407ea4 100644 (file)
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
                /* prepare to write next block of bytes */
                cur_len -= SMB_MAX_COUNT;
                databuff += SMB_MAX_COUNT;
+               rmiaddr += SMB_MAX_COUNT;
        }
 exit:
        mutex_unlock(&rmi_smb->page_mutex);
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
                /* prepare to read next block of bytes */
                cur_len -= SMB_MAX_COUNT;
                databuff += SMB_MAX_COUNT;
+               rmiaddr += SMB_MAX_COUNT;
        }
 
        retval = 0;
index 2ca586f..e08b0ef 100644 (file)
@@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        aiptek->inputdev = inputdev;
        aiptek->intf = intf;
-       aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+       aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
        aiptek->inDelay = 0;
        aiptek->endDelay = 0;
        aiptek->previousJitterable = 0;
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
        input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
 
        /* Verify that a device really has an endpoint */
-       if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&intf->dev,
                        "interface has %d endpoints, but must have minimum 1\n",
-                       intf->altsetting[0].desc.bNumEndpoints);
+                       intf->cur_altsetting->desc.bNumEndpoints);
                err = -EINVAL;
                goto fail3;
        }
-       endpoint = &intf->altsetting[0].endpoint[0].desc;
+       endpoint = &intf->cur_altsetting->endpoint[0].desc;
 
        /* Go set up our URB, which is called when the tablet receives
         * input.
index 3503122..96d6557 100644 (file)
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
        }
 
        /* Sanity check that a device has an endpoint */
-       if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+       if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&usbinterface->dev,
                        "Invalid number of endpoints\n");
                error = -EINVAL;
                goto err_free_urb;
        }
 
-       /*
-        * The endpoint is always altsetting 0, we know this since we know
-        * this device only has one interrupt endpoint
-        */
-       endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+       endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
 
        /* Some debug */
        dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
        if (usb_endpoint_xfer_int(endpoint))
                dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
 
-       dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+       dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+               usbinterface->cur_altsetting->extralen);
 
        /*
         * Find the HID descriptor so we can find out the size of the
@@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
        input_dev->dev.parent = &usbinterface->dev;
 
        /* Setup the URB, it will be posted later on open of input device */
-       endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
        usb_fill_int_urb(gtco->urbinfo,
                         udev,
                         usb_rcvintpipe(udev,
index a1f3a0c..38f0874 100644 (file)
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
                return -ENODEV;
 
        /* Sanity check that the device has an endpoint */
-       if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&intf->dev, "Invalid number of endpoints\n");
                return -EINVAL;
        }
index 0af0fe8..742a7e9 100644 (file)
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct device *hwmon;
+       struct thermal_zone_device *thermal;
        int error;
        u32 reg;
        bool ts_attached;
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        if (IS_ERR(hwmon))
                return PTR_ERR(hwmon);
 
-       devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+       thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+                                                      &sun4i_ts_tz_ops);
+       if (IS_ERR(thermal))
+               return PTR_ERR(thermal);
 
        writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
 
index 1dd47dd..34d31c7 100644 (file)
@@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface,
        int error;
 
        /* Check if we really have the right interface. */
-       iface_desc = &interface->altsetting[0];
+       iface_desc = interface->cur_altsetting;
        if (iface_desc->desc.bInterfaceClass != 0xFF)
                return -ENODEV;
 
index 568c523..483f7bc 100644 (file)
@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
 {
        struct pci_dev *pdev = iommu->dev;
-       u64 val = 0xabcd, val2 = 0;
+       u64 val = 0xabcd, val2 = 0, save_reg = 0;
 
        if (!iommu_feature(iommu, FEATURE_PC))
                return;
 
        amd_iommu_pc_present = true;
 
+       /* save the value to restore, if writable */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+               goto pc_false;
+
        /* Check if the performance counters can be written to */
        if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
            (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
-           (val != val2)) {
-               pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
-               amd_iommu_pc_present = false;
-               return;
-       }
+           (val != val2))
+               goto pc_false;
+
+       /* restore */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+               goto pc_false;
 
        pci_info(pdev, "IOMMU performance counters supported\n");
 
        val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
        iommu->max_banks = (u8) ((val >> 12) & 0x3f);
        iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+       return;
+
+pc_false:
+       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+       amd_iommu_pc_present = false;
+       return;
 }
 
 static ssize_t amd_iommu_show_cap(struct device *dev,
index 1801f0a..932267f 100644 (file)
@@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
 
        spin_lock_irqsave(&device_domain_lock, flags);
        info = dev->archdata.iommu;
-       if (info)
+       if (info && info != DEFER_DEVICE_DOMAIN_INFO
+           && info != DUMMY_DEVICE_DOMAIN_INFO)
                __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
index b7e0ae1..e8922fa 100644 (file)
@@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash,
                switch (id) {
                case AS_LED_FLASH:
                        flash->flash_node = child;
+                       fwnode_handle_get(child);
                        break;
                case AS_LED_INDICATOR:
                        flash->indicator_node = child;
+                       fwnode_handle_get(child);
                        break;
                default:
                        dev_warn(&flash->client->dev,
                                 "unknown LED %u encountered, ignoring\n", id);
                        break;
                }
-               fwnode_handle_get(child);
        }
 
        if (!flash->flash_node) {
index a5c73f3..2bf7459 100644 (file)
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                struct gpio_led led = {};
                const char *state = NULL;
 
+               /*
+                * Acquire gpiod from DT with uninitialized label, which
+                * will be updated after LED class device is registered,
+                * Only then the final LED name is known.
+                */
                led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
                                                             GPIOD_ASIS,
-                                                            led.name);
+                                                            NULL);
                if (IS_ERR(led.gpiod)) {
                        fwnode_handle_put(child);
                        return ERR_CAST(led.gpiod);
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                        fwnode_handle_put(child);
                        return ERR_PTR(ret);
                }
+               /* Set gpiod label to match the corresponding LED name. */
+               gpiod_set_consumer_name(led_dat->gpiod,
+                                       led_dat->cdev.dev->kobj.name);
                priv->num_leds++;
        }
 
index 0507c65..491268b 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // TI LM3532 LED driver
 // Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
 
 #include <linux/i2c.h>
 #include <linux/leds.h>
@@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
 
                led->num_leds = fwnode_property_count_u32(child, "led-sources");
                if (led->num_leds > LM3532_MAX_LED_STRINGS) {
-                       dev_err(&priv->client->dev, "To many LED string defined\n");
+                       dev_err(&priv->client->dev, "Too many LED string defined\n");
                        continue;
                }
 
index 4c2d0b3..a0d4b72 100644 (file)
@@ -135,9 +135,16 @@ err_node_put:
        return rv;
 }
 
+static const struct of_device_id max77650_led_of_match[] = {
+       { .compatible = "maxim,max77650-led" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
 static struct platform_driver max77650_led_driver = {
        .driver = {
                .name = "max77650-led",
+               .of_match_table = max77650_led_of_match,
        },
        .probe = max77650_led_probe,
 };
index db5af83..b6447c1 100644 (file)
@@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev,
 {
        if (brightness)
                set_latch_u5(LO_ULED, 0);
-
        else
                set_latch_u5(0, LO_ULED);
 }
index 718729c..3abcafe 100644 (file)
@@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void)
 module_init(pattern_trig_init);
 module_exit(pattern_trig_exit);
 
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
 MODULE_DESCRIPTION("LED Pattern trigger");
 MODULE_LICENSE("GPL v2");
index 95b41c0..663d879 100644 (file)
@@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
                                         card->erase_arg == MMC_TRIM_ARG ?
                                         INAND_CMD38_ARG_TRIM :
                                         INAND_CMD38_ARG_ERASE,
-                                        0);
+                                        card->ext_csd.generic_cmd6_time);
                }
                if (!err)
                        err = mmc_erase(card, from, nr, card->erase_arg);
@@ -1149,7 +1149,7 @@ retry:
                                 arg == MMC_SECURE_TRIM1_ARG ?
                                 INAND_CMD38_ARG_SECTRIM1 :
                                 INAND_CMD38_ARG_SECERASE,
-                                0);
+                                card->ext_csd.generic_cmd6_time);
                if (err)
                        goto out_retry;
        }
@@ -1167,7 +1167,7 @@ retry:
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         INAND_CMD38_ARG_EXT_CSD,
                                         INAND_CMD38_ARG_SECTRIM2,
-                                        0);
+                                        card->ext_csd.generic_cmd6_time);
                        if (err)
                                goto out_retry;
                }
index abf8f5e..aa54d35 100644 (file)
@@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work)
        }
 
        for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-               if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+               unsigned int freq = freqs[i];
+               if (freq > host->f_max) {
+                       if (i + 1 < ARRAY_SIZE(freqs))
+                               continue;
+                       freq = host->f_max;
+               }
+               if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
                        break;
                if (freqs[i] <= host->f_min)
                        break;
@@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work)
 
 void mmc_start_host(struct mmc_host *host)
 {
-       host->f_init = max(freqs[0], host->f_min);
+       host->f_init = max(min(freqs[0], host->f_max), host->f_min);
        host->rescan_disable = 0;
        host->ios.power_mode = MMC_POWER_UNDEFINED;
 
index 105b7a7..c876872 100644 (file)
@@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host)
        struct device *dev = host->parent;
        u32 bus_width, drv_type, cd_debounce_delay_ms;
        int ret;
-       bool cd_cap_invert, cd_gpio_invert = false;
-       bool ro_cap_invert, ro_gpio_invert = false;
 
        if (!dev || !dev_fwnode(dev))
                return 0;
@@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host)
         */
 
        /* Parse Card Detection */
+
        if (device_property_read_bool(dev, "non-removable")) {
                host->caps |= MMC_CAP_NONREMOVABLE;
        } else {
-               cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+               if (device_property_read_bool(dev, "cd-inverted"))
+                       host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
 
                if (device_property_read_u32(dev, "cd-debounce-delay-ms",
                                             &cd_debounce_delay_ms))
@@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host)
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, false,
-                                          cd_debounce_delay_ms * 1000,
-                                          &cd_gpio_invert);
+                                          cd_debounce_delay_ms * 1000);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
                else if (ret != -ENOENT && ret != -ENOSYS)
                        return ret;
-
-               /*
-                * There are two ways to flag that the CD line is inverted:
-                * through the cd-inverted flag and by the GPIO line itself
-                * being inverted from the GPIO subsystem. This is a leftover
-                * from the times when the GPIO subsystem did not make it
-                * possible to flag a line as inverted.
-                *
-                * If the capability on the host AND the GPIO line are
-                * both inverted, the end result is that the CD line is
-                * not inverted.
-                */
-               if (cd_cap_invert ^ cd_gpio_invert)
-                       host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
        }
 
        /* Parse Write Protection */
-       ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
 
-       ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
+       if (device_property_read_bool(dev, "wp-inverted"))
+               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+       ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
        if (!ret)
                dev_info(host->parent, "Got WP GPIO\n");
        else if (ret != -ENOENT && ret != -ENOSYS)
@@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host)
        if (device_property_read_bool(dev, "disable-wp"))
                host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
 
-       /* See the comment on CD inversion above */
-       if (ro_cap_invert ^ ro_gpio_invert)
-               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
        if (device_property_read_bool(dev, "cap-sd-highspeed"))
                host->caps |= MMC_CAP_SD_HIGHSPEED;
        if (device_property_read_bool(dev, "cap-mmc-highspeed"))
index 09113b9..da425ee 100644 (file)
@@ -19,7 +19,9 @@
 #include "host.h"
 #include "mmc_ops.h"
 
-#define MMC_OPS_TIMEOUT_MS     (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_OPS_TIMEOUT_MS             (10 * 60 * 1000) /* 10min*/
+#define MMC_BKOPS_TIMEOUT_MS           (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS     (30 * 1000) /* 30s */
 
 static const u8 tuning_blk_pattern_4bit[] = {
        0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
        bool expired = false;
        bool busy = false;
 
-       /* We have an unspecified cmd timeout, use the fallback value. */
-       if (!timeout_ms)
-               timeout_ms = MMC_OPS_TIMEOUT_MS;
-
        /*
         * In cases when not allowed to poll by using CMD13 or because we aren't
         * capable of polling by using ->card_busy(), then rely on waiting the
@@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 
        mmc_retune_hold(host);
 
+       if (!timeout_ms) {
+               pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+                       mmc_hostname(host));
+               timeout_ms = card->ext_csd.generic_cmd6_time;
+       }
+
        /*
-        * If the cmd timeout and the max_busy_timeout of the host are both
-        * specified, let's validate them. A failure means we need to prevent
-        * the host from doing hw busy detection, which is done by converting
-        * to a R1 response instead of a R1B.
+        * If the max_busy_timeout of the host is specified, make sure it's
+        * enough to fit the used timeout_ms. In case it's not, let's instruct
+        * the host to avoid HW busy detection, by converting to a R1 response
+        * instead of a R1B.
         */
-       if (timeout_ms && host->max_busy_timeout &&
-               (timeout_ms > host->max_busy_timeout))
+       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
                use_r1b_resp = false;
 
        cmd.opcode = MMC_SWITCH;
@@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
        cmd.flags = MMC_CMD_AC;
        if (use_r1b_resp) {
                cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
-               /*
-                * A busy_timeout of zero means the host can decide to use
-                * whatever value it finds suitable.
-                */
                cmd.busy_timeout = timeout_ms;
        } else {
                cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
@@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card)
         * urgent levels by using an asynchronous background task, when idle.
         */
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                       EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
+                        EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
        if (err)
                pr_warn("%s: Error %d starting bkops\n",
                        mmc_hostname(card->host), err);
@@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card)
                        (card->ext_csd.cache_size > 0) &&
                        (card->ext_csd.cache_ctrl & 1)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                               EXT_CSD_FLUSH_CACHE, 1, 0);
+                                EXT_CSD_FLUSH_CACHE, 1,
+                                MMC_CACHE_FLUSH_TIMEOUT_MS);
                if (err)
                        pr_err("%s: cache flush error %d\n",
                                        mmc_hostname(card->host), err);
index da2596c..05e9074 100644 (file)
@@ -19,7 +19,6 @@
 struct mmc_gpio {
        struct gpio_desc *ro_gpio;
        struct gpio_desc *cd_gpio;
-       bool override_cd_active_level;
        irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
        char *ro_label;
        char *cd_label;
@@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host)
                return -ENOSYS;
 
        cansleep = gpiod_cansleep(ctx->cd_gpio);
-       if (ctx->override_cd_active_level) {
-               int value = cansleep ?
-                               gpiod_get_raw_value_cansleep(ctx->cd_gpio) :
-                               gpiod_get_raw_value(ctx->cd_gpio);
-               return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
-       }
-
        return cansleep ?
                gpiod_get_value_cansleep(ctx->cd_gpio) :
                gpiod_get_value(ctx->cd_gpio);
@@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
  * @idx: index of the GPIO to obtain in the consumer
  * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
  * @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not, set
- * to NULL to ignore
  *
  * Note that this must be called prior to mmc_add_host()
  * otherwise the caller must also call mmc_gpiod_request_cd_irq().
@@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
  */
 int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
                         unsigned int idx, bool override_active_level,
-                        unsigned int debounce, bool *gpio_invert)
+                        unsigned int debounce)
 {
        struct mmc_gpio *ctx = host->slot.handler_priv;
        struct gpio_desc *desc;
@@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
                        ctx->cd_debounce_delay_ms = debounce / 1000;
        }
 
-       if (gpio_invert)
-               *gpio_invert = !gpiod_is_active_low(desc);
+       /* override forces default (active-low) polarity ... */
+       if (override_active_level && !gpiod_is_active_low(desc))
+               gpiod_toggle_active_low(desc);
+
+       /* ... or active-high */
+       if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+               gpiod_toggle_active_low(desc);
 
-       ctx->override_cd_active_level = override_active_level;
        ctx->cd_gpio = desc;
 
        return 0;
@@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
  * @con_id: function within the GPIO consumer
  * @idx: index of the GPIO to obtain in the consumer
  * @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not,
- * set to NULL to ignore
  *
  * Returns zero on success, else an error.
  */
 int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
-                        unsigned int idx,
-                        unsigned int debounce, bool *gpio_invert)
+                        unsigned int idx, unsigned int debounce)
 {
        struct mmc_gpio *ctx = host->slot.handler_priv;
        struct gpio_desc *desc;
@@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
                        return ret;
        }
 
-       if (gpio_invert)
-               *gpio_invert = !gpiod_is_active_low(desc);
+       if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+               gpiod_toggle_active_low(desc);
 
        ctx->ro_gpio = desc;
 
index d06b2df..3a5089f 100644 (file)
@@ -501,6 +501,7 @@ config MMC_SDHCI_MSM
        depends on ARCH_QCOM || (ARM && COMPILE_TEST)
        depends on MMC_SDHCI_PLTFM
        select MMC_SDHCI_IO_ACCESSORS
+       select MMC_CQHCI
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in Qualcomm SOCs. The controller supports
@@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB
        tristate "Broadcom SDIO/SD/MMC support"
        depends on ARCH_BRCMSTB || BMIPS_GENERIC
        depends on MMC_SDHCI_PLTFM
+       select MMC_CQHCI
        default y
        help
          This selects support for the SDIO/SD/MMC Host Controller on
@@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP
        depends on MMC_SDHCI_PLTFM && OF
        select THERMAL
        imply TI_SOC_THERMAL
+       select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in TI's DRA7 SOCs. The controller supports
@@ -1040,3 +1043,6 @@ config MMC_OWL
        help
          This selects support for the SD/MMC Host Controller on
          Actions Semi Owl SoCs.
+
+config MMC_SDHCI_EXTERNAL_DMA
+       bool
index 6f065bb..aeaaa53 100644 (file)
@@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev)
 {
        struct atmel_mci *host = dev_get_drvdata(dev);
 
-       pinctrl_pm_select_default_state(dev);
+       pinctrl_select_default_state(dev);
 
        return clk_prepare_enable(host->mck);
 }
index bc8aeb4..8823680 100644 (file)
@@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no IRQ defined\n");
+       host->irq = platform_get_irq(pdev, 0);
+       if (host->irq < 0)
                goto out3;
-       }
-       host->irq = r->start;
 
        mmc->ops = &au1xmmc_ops;
 
index 99f61fd..c3d9498 100644 (file)
@@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev)
        host->dma_chan = NULL;
        host->dma_desc = NULL;
 
-       host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
+       host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
+       if (IS_ERR(host->dma_chan_rxtx)) {
+               ret = PTR_ERR(host->dma_chan_rxtx);
+               host->dma_chan_rxtx = NULL;
+
+               if (ret == -EPROBE_DEFER)
+                       goto err;
+
+               /* Ignore errors to fall back to PIO mode */
+       }
+
 
        clk = devm_clk_get(dev, NULL);
        if (IS_ERR(clk)) {
index eee08d8..76013bb 100644 (file)
@@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
                return ret;
 
        host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
-       if (!host->base)
-               return -EINVAL;
+       if (!host->base) {
+               ret = -EINVAL;
+               goto error;
+       }
 
        /* On ThunderX these are identical */
        host->dma_base = host->base;
@@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
        host->reg_off_dma = 0x160;
 
        host->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(host->clk))
-               return PTR_ERR(host->clk);
+       if (IS_ERR(host->clk)) {
+               ret = PTR_ERR(host->clk);
+               goto error;
+       }
 
        ret = clk_prepare_enable(host->clk);
        if (ret)
-               return ret;
+               goto error;
        host->sys_freq = clk_get_rate(host->clk);
 
        spin_lock_init(&host->irq_handler_lock);
@@ -157,6 +161,7 @@ error:
                }
        }
        clk_disable_unprepare(host->clk);
+       pci_release_regions(pdev);
        return ret;
 }
 
@@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
        writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 
        clk_disable_unprepare(host->clk);
+       pci_release_regions(pdev);
 }
 
 static const struct pci_device_id thunder_mmc_id_table[] = {
index ebfaeb3..f01fecd 100644 (file)
@@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
                mmc->caps |= pdata->caps;
 
        /* Register a cd gpio, if there is not one, enable polling */
-       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                return ret;
        else if (ret)
                mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-       ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+       ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
        if (ret == -EPROBE_DEFER)
                return ret;
 
index fc9d4d0..bc5278a 100644 (file)
@@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host)
        if (!host->dms)
                return -ENOMEM;
 
-       host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
-       if (!host->dms->ch) {
+       host->dms->ch = dma_request_chan(host->dev, "rx-tx");
+       if (IS_ERR(host->dms->ch)) {
+               int ret = PTR_ERR(host->dms->ch);
+
                dev_err(host->dev, "Failed to get external DMA channel.\n");
                kfree(host->dms);
                host->dms = NULL;
-               return -ENXIO;
+               return ret;
        }
 
        return 0;
index 78383f6..fbae87d 100644 (file)
@@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev)
 
 static int jz4740_mmc_resume(struct device *dev)
 {
-       return pinctrl_pm_select_default_state(dev);
+       return pinctrl_select_default_state(dev);
 }
 
 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
index e712315..35400cf 100644 (file)
@@ -161,7 +161,6 @@ struct meson_host {
        bool dram_access_quirk;
 
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pins_default;
        struct pinctrl_state *pins_clk_gate;
 
        unsigned int bounce_buf_size;
@@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
        u32 cfg;
 
        if (host->pins_clk_gate)
-               pinctrl_select_state(host->pinctrl, host->pins_default);
+               pinctrl_select_default_state(host->dev);
 
        /* Make sure the clock is not stopped in the controller */
        cfg = readl(host->regs + SD_EMMC_CFG);
@@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
                goto free_host;
        }
 
-       host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                                                 PINCTRL_STATE_DEFAULT);
-       if (IS_ERR(host->pins_default)) {
-               ret = PTR_ERR(host->pins_default);
-               goto free_host;
-       }
-
        host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
                                                   "clk-gate");
        if (IS_ERR(host->pins_clk_gate)) {
index ba9a63d..8b038e7 100644 (file)
@@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
        struct platform_device *slot_pdev;
        struct mmc_host *mmc;
        struct meson_mx_mmc_host *host;
-       struct resource *res;
        int ret, irq;
        u32 conf;
 
@@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->base = devm_ioremap_resource(host->controller_dev, res);
+       host->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->base)) {
                ret = PTR_ERR(host->base);
                goto error_free_mmc;
index 74c6cfb..951f76d 100644 (file)
@@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
         * SPI protocol.  Another is that when chipselect is released while
         * the card returns BUSY status, the clock must issue several cycles
         * with chipselect high before the card will stop driving its output.
+        *
+        * SPI_CS_HIGH means "asserted" here. In some cases like when using
+        * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+        * inverted by gpiolib, so if we want to ascertain to drive it high
+        * we should toggle the default with an XOR as we do here.
         */
-       host->spi->mode |= SPI_CS_HIGH;
+       host->spi->mode ^= SPI_CS_HIGH;
        if (spi_setup(host->spi) != 0) {
                /* Just warn; most cards work without it. */
                dev_warn(&host->spi->dev,
                                "can't change chip-select polarity\n");
-               host->spi->mode &= ~SPI_CS_HIGH;
+               host->spi->mode ^= SPI_CS_HIGH;
        } else {
                mmc_spi_readbytes(host, 18);
 
-               host->spi->mode &= ~SPI_CS_HIGH;
+               host->spi->mode ^= SPI_CS_HIGH;
                if (spi_setup(host->spi) != 0) {
                        /* Wot, we can't get the same setup we had before? */
                        dev_err(&host->spi->dev,
@@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi)
         * Index 0 is card detect
         * Old boardfiles were specifying 1 ms as debounce
         */
-       status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
+       status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
        if (status == -EPROBE_DEFER)
                goto fail_add_host;
        if (!status) {
@@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi)
        mmc_detect_change(mmc, 0);
 
        /* Index 1 is write protect/read only */
-       status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
+       status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
        if (status == -EPROBE_DEFER)
                goto fail_add_host;
        if (!status)
index 40e72c3..e9ffce8 100644 (file)
@@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = {
        .cmdreg_srsp            = MCI_CPSM_RESPONSE,
        .datalength_bits        = 24,
        .datactrl_blocksz       = 11,
+       .datactrl_any_blocksz   = true,
+       .dma_power_of_2         = true,
        .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
@@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = {
        .datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
        .datalength_bits        = 24,
        .datactrl_blocksz       = 11,
+       .datactrl_any_blocksz   = true,
+       .dma_power_of_2         = true,
        .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
@@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = {
        .datacnt_useless        = true,
        .datalength_bits        = 25,
        .datactrl_blocksz       = 14,
+       .datactrl_any_blocksz   = true,
        .stm32_idmabsize_mask   = GENMASK(12, 5),
        .busy_timeout           = true,
        .busy_detect            = true,
@@ -284,6 +289,7 @@ static struct variant_data variant_qcom = {
        .data_cmd_enable        = MCI_CPSM_QCOM_DATCMD,
        .datalength_bits        = 24,
        .datactrl_blocksz       = 11,
+       .datactrl_any_blocksz   = true,
        .pwrreg_powerup         = MCI_PWR_UP,
        .f_max                  = 208000000,
        .explicit_mclk_control  = true,
@@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host)
 static int mmci_validate_data(struct mmci_host *host,
                              struct mmc_data *data)
 {
+       struct variant_data *variant = host->variant;
+
        if (!data)
                return 0;
-
-       if (!is_power_of_2(data->blksz)) {
+       if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
                dev_err(mmc_dev(host->mmc),
                        "unsupported block size (%d bytes)\n", data->blksz);
                return -EINVAL;
@@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
                 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
                 data->sg_len, data->blksz, data->blocks, data->flags);
 
-       host->ops->dma_start(host, &datactrl);
+       ret = host->ops->dma_start(host, &datactrl);
+       if (ret)
+               return ret;
 
        /* Trigger the DMA transfer */
        mmci_write_datactrlreg(host, datactrl);
@@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host)
 
        host->dma_priv = dmae;
 
-       dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
-                                                    "rx");
-       dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
-                                                    "tx");
+       dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+       if (IS_ERR(dmae->rx_channel)) {
+               int ret = PTR_ERR(dmae->rx_channel);
+               dmae->rx_channel = NULL;
+               return ret;
+       }
+
+       dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+       if (IS_ERR(dmae->tx_channel)) {
+               if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+                       dev_warn(mmc_dev(host->mmc),
+                                "Deferred probe for TX channel ignored\n");
+               dmae->tx_channel = NULL;
+       }
 
        /*
         * If only an RX channel is specified, the driver will
@@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
        if (data->blksz * data->blocks <= variant->fifosize)
                return -EINVAL;
 
+       /*
+        * This is necessary to get SDIO working on the Ux500. We do not yet
+        * know if this is a bug in:
+        * - The Ux500 DMA controller (DMA40)
+        * - The MMCI DMA interface on the Ux500
+        * some power of two blocks (such as 64 bytes) are sent regularly
+        * during SDIO traffic and those work fine so for these we enable DMA
+        * transfers.
+        */
+       if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+               return -EINVAL;
+
        device = chan->device;
        nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
                           mmc_get_dma_dir(data));
@@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host,
 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
 {
        struct mmci_dmae_priv *dmae = host->dma_priv;
+       int ret;
 
        host->dma_in_progress = true;
-       dmaengine_submit(dmae->desc_current);
+       ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+       if (ret < 0) {
+               host->dma_in_progress = false;
+               return ret;
+       }
        dma_async_issue_pending(dmae->cur);
 
        *datactrl |= MCI_DPSM_DMAENABLE;
@@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        } else if (host->variant->busy_timeout && busy_resp &&
                   status & MCI_DATATIMEOUT) {
                cmd->error = -ETIMEDOUT;
+               host->irq_action = IRQ_WAKE_THREAD;
        } else {
                cmd->resp[0] = readl(base + MMCIRESPONSE0);
                cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
                                return;
                        }
                }
-               mmci_request_end(host, host->mrq);
+
+               if (host->irq_action != IRQ_WAKE_THREAD)
+                       mmci_request_end(host, host->mrq);
+
        } else if (sbc) {
                mmci_start_command(host, host->mrq->cmd, 0);
        } else if (!host->variant->datactrl_first &&
@@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
 {
        struct mmci_host *host = dev_id;
        u32 status;
-       int ret = 0;
 
        spin_lock(&host->lock);
+       host->irq_action = IRQ_HANDLED;
 
        do {
                status = readl(host->base + MMCISTATUS);
@@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                if (host->variant->busy_detect_flag)
                        status &= ~host->variant->busy_detect_flag;
 
-               ret = 1;
        } while (status);
 
        spin_unlock(&host->lock);
 
-       return IRQ_RETVAL(ret);
+       return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+       struct mmci_host *host = dev_id;
+       unsigned long flags;
+
+       if (host->rst) {
+               reset_control_assert(host->rst);
+               udelay(2);
+               reset_control_deassert(host->rst);
+       }
+
+       spin_lock_irqsave(&host->lock, flags);
+       writel(host->clk_reg, host->base + MMCICLOCK);
+       writel(host->pwr_reg, host->base + MMCIPOWER);
+       writel(MCI_IRQENABLE | host->variant->start_err,
+              host->base + MMCIMASK0);
+
+       host->irq_action = IRQ_HANDLED;
+       mmci_request_end(host, host->mrq);
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return host->irq_action;
 }
 
 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
                        pinctrl_select_state(host->pinctrl, host->pins_opendrain);
                else
-                       pinctrl_select_state(host->pinctrl, host->pins_default);
+                       pinctrl_select_default_state(mmc_dev(mmc));
        }
 
        /*
@@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev,
                        goto host_free;
                }
 
-               host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                                                         PINCTRL_STATE_DEFAULT);
-               if (IS_ERR(host->pins_default)) {
-                       dev_err(mmc_dev(mmc), "Can't select default pins\n");
-                       ret = PTR_ERR(host->pins_default);
-                       goto host_free;
-               }
-
                host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
                                                            MMCI_PINCTRL_STATE_OPENDRAIN);
                if (IS_ERR(host->pins_opendrain)) {
@@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev,
         * silently of these do not exist
         */
        if (!np) {
-               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
                if (ret == -EPROBE_DEFER)
                        goto clk_disable;
 
-               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
                if (ret == -EPROBE_DEFER)
                        goto clk_disable;
        }
 
-       ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
-                       DRIVER_NAME " (cmd)", host);
+       ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+                                       mmci_irq_thread, IRQF_SHARED,
+                                       DRIVER_NAME " (cmd)", host);
        if (ret)
                goto clk_disable;
 
@@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev)
                struct mmci_host *host = mmc_priv(mmc);
                clk_prepare_enable(host->clk);
                mmci_restore(host);
-               pinctrl_pm_select_default_state(dev);
+               pinctrl_select_default_state(dev);
        }
 
        return 0;
index 158e123..ea6a0b5 100644 (file)
@@ -279,7 +279,11 @@ struct mmci_host;
  * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
  * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
  * @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ *               hardware, such as with some SDIO traffic that send
+ *               odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
  * @datactrl_first: true if data must be setup before send command
  * @datacnt_useless: true if you could not use datacnt register to read
  *                  remaining data
@@ -326,6 +330,8 @@ struct variant_data {
        unsigned int            datactrl_mask_ddrmode;
        unsigned int            datactrl_mask_sdio;
        unsigned int            datactrl_blocksz;
+       u8                      datactrl_any_blocksz:1;
+       u8                      dma_power_of_2:1;
        u8                      datactrl_first:1;
        u8                      datacnt_useless:1;
        u8                      st_sdio:1;
@@ -404,7 +410,6 @@ struct mmci_host {
        struct mmci_host_ops    *ops;
        struct variant_data     *variant;
        struct pinctrl          *pinctrl;
-       struct pinctrl_state    *pins_default;
        struct pinctrl_state    *pins_opendrain;
 
        u8                      hw_designer;
@@ -412,6 +417,7 @@ struct mmci_host {
 
        struct timer_list       timer;
        unsigned int            oldstat;
+       u32                     irq_action;
 
        /* pio stuff */
        struct sg_mapping_iter  sg_miter;
index 010fe29..7726dcf 100644 (file)
@@ -2194,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
        if (ret)
                goto host_free;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->base = devm_ioremap_resource(&pdev->dev, res);
+       host->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->base)) {
                ret = PTR_ERR(host->base);
                goto host_free;
index 74a0a7f..203b617 100644 (file)
@@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev)
        struct mmc_host *mmc = NULL;
        struct mvsd_host *host = NULL;
        const struct mbus_dram_target_info *dram;
-       struct resource *r;
        int ret, irq;
 
        if (!np) {
                dev_err(&pdev->dev, "no DT node\n");
                return -ENODEV;
        }
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!r || irq < 0)
+       if (irq < 0)
                return -ENXIO;
 
        mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
 
        spin_lock_init(&host->lock);
 
-       host->base = devm_ioremap_resource(&pdev->dev, r);
+       host->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->base)) {
                ret = PTR_ERR(host->base);
                goto out;
index 011b59a..b3d654c 100644 (file)
@@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev)
        mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
 
        if (!host->pdata) {
-               host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+               host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+               if (IS_ERR(host->dma)) {
+                       if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
+                               ret = -EPROBE_DEFER;
+                               goto out_clk_put;
+                       }
+
+                       /* Ignore errors to fall back to PIO mode */
+                       host->dma = NULL;
+               }
        } else {
                res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
                if (res) {
index 4031217..d82674a 100644 (file)
@@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
                goto out_clk_disable;
        }
 
-       ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
-       if (!ssp->dmach) {
+       ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+       if (IS_ERR(ssp->dmach)) {
                dev_err(mmc_dev(host->mmc),
                        "%s: failed to request dma\n", __func__);
-               ret = -ENODEV;
+               ret = PTR_ERR(ssp->dmach);
                goto out_clk_disable;
        }
 
index 767e964..a379c45 100644 (file)
@@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
                        ret = PTR_ERR(p);
                        goto err_free_irq;
                }
-               if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
-                       dev_info(host->dev, "missing default pinctrl state\n");
-                       devm_pinctrl_put(p);
-                       ret = -EINVAL;
-                       goto err_free_irq;
-               }
 
                if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
                        dev_info(host->dev, "missing idle pinctrl state\n");
@@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
        if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
            (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
 
-               pinctrl_pm_select_default_state(host->dev);
+               pinctrl_select_default_state(host->dev);
 
                /* irq lost, if pinmux incorrect */
                OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
                OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
                OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
        } else {
-               pinctrl_pm_select_default_state(host->dev);
+               pinctrl_select_default_state(host->dev);
        }
        spin_unlock_irqrestore(&host->irq_lock, flags);
        return 0;
index 771e3d0..01ffe51 100644 (file)
@@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev)
 
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
-       if (!owl_host->dma) {
+       owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
+       if (IS_ERR(owl_host->dma)) {
                dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
-               ret = -ENXIO;
+               ret = PTR_ERR(owl_host->dma);
                goto err_free_host;
        }
 
index 024acc1..3a93334 100644 (file)
@@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, mmc);
 
-       host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
-       if (host->dma_chan_rx == NULL) {
+       host->dma_chan_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(host->dma_chan_rx)) {
                dev_err(dev, "unable to request rx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(host->dma_chan_rx);
+               host->dma_chan_rx = NULL;
                goto out;
        }
 
-       host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
-       if (host->dma_chan_tx == NULL) {
+       host->dma_chan_tx = dma_request_chan(dev, "tx");
+       if (IS_ERR(host->dma_chan_tx)) {
                dev_err(dev, "unable to request tx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(host->dma_chan_tx);
+               host->dma_chan_tx = NULL;
                goto out;
        }
 
@@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev)
                }
 
                /* FIXME: should we pass detection delay to debounce? */
-               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
                if (ret && ret != -ENOENT) {
                        dev_err(dev, "Failed requesting gpio_cd\n");
                        goto out;
                }
 
-               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+               if (!host->pdata->gpio_card_ro_invert)
+                       mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
                if (ret && ret != -ENOENT) {
                        dev_err(dev, "Failed requesting gpio_ro\n");
                        goto out;
                }
-               if (!ret) {
+               if (!ret)
                        host->use_ro_gpio = true;
-                       mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
-                               0 : MMC_CAP2_RO_ACTIVE_HIGH;
-               }
 
                if (host->pdata->init)
                        host->pdata->init(dev, pxamci_detect_irq, mmc);
index c0504aa..f524251 100644 (file)
@@ -14,8 +14,8 @@
 
 struct renesas_sdhi_scc {
        unsigned long clk_rate; /* clock rate for SDR104 */
-       u32 tap;                /* sampling clock position for SDR104 */
-       u32 tap_hs400;          /* sampling clock position for HS400 */
+       u32 tap;                /* sampling clock position for SDR104/HS400 (8 TAP) */
+       u32 tap_hs400_4tap;     /* sampling clock position for HS400 (4 TAP) */
 };
 
 struct renesas_sdhi_of_data {
@@ -33,6 +33,11 @@ struct renesas_sdhi_of_data {
        unsigned short max_segs;
 };
 
+struct renesas_sdhi_quirks {
+       bool hs400_disabled;
+       bool hs400_4taps;
+};
+
 struct tmio_mmc_dma {
        enum dma_slave_buswidth dma_buswidth;
        bool (*filter)(struct dma_chan *chan, void *arg);
@@ -46,6 +51,7 @@ struct renesas_sdhi {
        struct clk *clk_cd;
        struct tmio_mmc_data mmc_data;
        struct tmio_mmc_dma dma_priv;
+       const struct renesas_sdhi_quirks *quirks;
        struct pinctrl *pinctrl;
        struct pinctrl_state *pins_default, *pins_uhs;
        void __iomem *scc_ctl;
index 234551a..35cb24c 100644 (file)
 #define SDHI_VER_GEN3_SD       0xcc10
 #define SDHI_VER_GEN3_SDMMC    0xcd10
 
-struct renesas_sdhi_quirks {
-       bool hs400_disabled;
-       bool hs400_4taps;
-};
-
 static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
 {
        u32 val;
@@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
                       0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
 
 
-       if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+       if (priv->quirks && priv->quirks->hs400_4taps)
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
                               host->tap_set / 2);
 
@@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
 static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
 {
        struct renesas_sdhi *priv = host_to_priv(host);
-       bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+       bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
 
        /*
         * Skip checking SCC errors when running on 4 taps in HS400 mode as
@@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
 };
 
 static const struct soc_device_attribute sdhi_quirks_match[]  = {
+       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
        { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
-       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
        { /* Sentinel. */ },
 };
@@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        if (!priv)
                return -ENOMEM;
 
+       priv->quirks = quirks;
        mmc_data = &priv->mmc_data;
        dma_priv = &priv->dma_priv;
 
@@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        if (quirks && quirks->hs400_disabled)
                host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
 
-       if (quirks && quirks->hs400_4taps)
-               mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400;
-
        /* For some SoC, we disable internal WP. GPIO may override this */
        if (mmc_can_gpio_ro(host->mmc))
                mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
@@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev,
             host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
                                 MMC_CAP2_HS400_1_8V))) {
                const struct renesas_sdhi_scc *taps = of_data->taps;
+               bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
                bool hit = false;
 
                for (i = 0; i < of_data->taps_num; i++) {
                        if (taps[i].clk_rate == 0 ||
                            taps[i].clk_rate == host->mmc->f_max) {
                                priv->scc_tappos = taps->tap;
-                               priv->scc_tappos_hs400 = taps->tap_hs400;
+                               priv->scc_tappos_hs400 = use_4tap ?
+                                                        taps->tap_hs400_4tap :
+                                                        taps->tap;
                                hit = true;
                                break;
                        }
                }
 
                if (!hit)
-                       dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+                       dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
 
                host->init_tuning = renesas_sdhi_init_tuning;
                host->prepare_tuning = renesas_sdhi_prepare_tuning;
index 18839a1..47ac53e 100644 (file)
@@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
        {
                .clk_rate = 0,
                .tap = 0x00000300,
-               .tap_hs400 = 0x00000704,
+               .tap_hs400_4tap = 0x00000100,
        },
 };
 
@@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
  * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
  * implementation as others may use a different implementation.
  */
-static const struct soc_device_attribute soc_whitelist[] = {
-       /* specific ones */
+static const struct soc_device_attribute soc_dma_quirks[] = {
        { .soc_id = "r7s9210",
          .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
        { .soc_id = "r8a7795", .revision = "ES1.*",
          .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
        { .soc_id = "r8a7796", .revision = "ES1.0",
          .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
-       /* generic ones */
-       { .soc_id = "r8a774a1" },
-       { .soc_id = "r8a774b1" },
-       { .soc_id = "r8a774c0" },
-       { .soc_id = "r8a77470" },
-       { .soc_id = "r8a7795" },
-       { .soc_id = "r8a7796" },
-       { .soc_id = "r8a77965" },
-       { .soc_id = "r8a77970" },
-       { .soc_id = "r8a77980" },
-       { .soc_id = "r8a77990" },
-       { .soc_id = "r8a77995" },
        { /* sentinel */ }
 };
 
 static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
 {
-       const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+       const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
        struct device *dev = &pdev->dev;
 
-       if (!soc)
-               return -ENODEV;
-
-       global_flags |= (unsigned long)soc->data;
+       if (soc)
+               global_flags |= (unsigned long)soc->data;
 
        dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
        if (!dev->dma_parms)
index bce9c33..1e616ae 100644 (file)
@@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
                mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
        /* If we get -ENOENT we have no card detect GPIO line */
-       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
        if (ret != -ENOENT) {
                dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
                        ret);
                return ret;
        }
 
-       ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+       ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
        if (ret != -ENOENT) {
                dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
                        ret);
index 105e73d..dd90857 100644 (file)
@@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
                bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
 
-               err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+               err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
                if (err) {
                        if (err == -EPROBE_DEFER)
                                goto err_free;
index 73bb440..ad01f64 100644 (file)
 #include <linux/mmc/host.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include "sdhci-pltfm.h"
+#include "cqhci.h"
 
-static const struct sdhci_ops sdhci_brcmstb_ops = {
+#define SDHCI_VENDOR 0x78
+#define  SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+#define BRCMSTB_PRIV_FLAGS_NO_64BIT            BIT(0)
+#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT      BIT(1)
+
+#define SDHCI_ARASAN_CQE_BASE_ADDR             0x200
+
+struct sdhci_brcmstb_priv {
+       void __iomem *cfg_regs;
+       bool has_cqe;
+};
+
+struct brcmstb_match_priv {
+       void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+       struct sdhci_ops *ops;
+       unsigned int flags;
+};
+
+static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       u32 reg;
+
+       dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n",
+               __func__);
+       reg = readl(host->ioaddr + SDHCI_VENDOR);
+       if (ios->enhanced_strobe)
+               reg |= SDHCI_VENDOR_ENHANCED_STRB;
+       else
+               reg &= ~SDHCI_VENDOR_ENHANCED_STRB;
+       writel(reg, host->ioaddr + SDHCI_VENDOR);
+}
+
+static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+       u16 clk;
+
+       host->mmc->actual_clock = 0;
+
+       clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+       sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+       if (clock == 0)
+               return;
+
+       sdhci_enable_clk(host, clk);
+}
+
+static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
+                                           unsigned int timing)
+{
+       u16 ctrl_2;
+
+       dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n",
+               __func__, timing);
+       ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+       /* Select Bus Speed Mode for host */
+       ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+       if ((timing == MMC_TIMING_MMC_HS200) ||
+           (timing == MMC_TIMING_UHS_SDR104))
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+       else if (timing == MMC_TIMING_UHS_SDR12)
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+       else if (timing == MMC_TIMING_SD_HS ||
+                timing == MMC_TIMING_MMC_HS ||
+                timing == MMC_TIMING_UHS_SDR25)
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+       else if (timing == MMC_TIMING_UHS_SDR50)
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+       else if ((timing == MMC_TIMING_UHS_DDR50) ||
+                (timing == MMC_TIMING_MMC_DDR52))
+               ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+       else if (timing == MMC_TIMING_MMC_HS400)
+               ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+       sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
+{
+       sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       u32 reg;
+
+       reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+       while (reg & SDHCI_DATA_AVAILABLE) {
+               sdhci_readl(host, SDHCI_BUFFER);
+               reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+       }
+
+       sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = {
+       .enable         = sdhci_brcmstb_cqe_enable,
+       .disable        = sdhci_cqe_disable,
+       .dumpregs       = sdhci_brcmstb_dumpregs,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops = {
        .set_clock = sdhci_set_clock,
        .set_bus_width = sdhci_set_bus_width,
        .reset = sdhci_reset,
        .set_uhs_signaling = sdhci_set_uhs_signaling,
 };
 
-static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+       .set_clock = sdhci_brcmstb_set_clock,
+       .set_bus_width = sdhci_set_bus_width,
+       .reset = sdhci_reset,
+       .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct brcmstb_match_priv match_priv_7425 = {
+       .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+       BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
        .ops = &sdhci_brcmstb_ops,
 };
 
+static struct brcmstb_match_priv match_priv_7445 = {
+       .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+       .ops = &sdhci_brcmstb_ops,
+};
+
+static const struct brcmstb_match_priv match_priv_7216 = {
+       .hs400es = sdhci_brcmstb_hs400es,
+       .ops = &sdhci_brcmstb_ops_7216,
+};
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+       { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+       { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+       { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+       {},
+};
+
+static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+       return 0;
+}
+
+static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+                                 struct sdhci_brcmstb_priv *priv)
+{
+       struct cqhci_host *cq_host;
+       bool dma64;
+       int ret;
+
+       if (!priv->has_cqe)
+               return sdhci_add_host(host);
+
+       dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+       host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+       ret = sdhci_setup_host(host);
+       if (ret)
+               return ret;
+
+       cq_host = devm_kzalloc(mmc_dev(host->mmc),
+                              sizeof(*cq_host), GFP_KERNEL);
+       if (!cq_host) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+       cq_host->ops = &sdhci_brcmstb_cqhci_ops;
+
+       dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+       if (dma64) {
+               dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+               cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+               cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+       }
+
+       ret = cqhci_init(cq_host, host->mmc, dma64);
+       if (ret)
+               goto cleanup;
+
+       ret = __sdhci_add_host(host);
+       if (ret)
+               goto cleanup;
+
+       return 0;
+
+cleanup:
+       sdhci_cleanup_host(host);
+       return ret;
+}
+
 static int sdhci_brcmstb_probe(struct platform_device *pdev)
 {
-       struct sdhci_host *host;
+       const struct brcmstb_match_priv *match_priv;
+       struct sdhci_pltfm_data brcmstb_pdata;
        struct sdhci_pltfm_host *pltfm_host;
+       const struct of_device_id *match;
+       struct sdhci_brcmstb_priv *priv;
+       struct sdhci_host *host;
+       struct resource *iomem;
+       bool has_cqe = false;
        struct clk *clk;
        int res;
 
+       match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
+       match_priv = match->data;
+
+       dev_dbg(&pdev->dev, "Probe found match for %s\n",  match->compatible);
+
        clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
+               if (PTR_ERR(clk) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
                dev_err(&pdev->dev, "Clock not found in Device Tree\n");
                clk = NULL;
        }
@@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
        if (res)
                return res;
 
-       host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+       memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+       if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+               has_cqe = true;
+               match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+       }
+       brcmstb_pdata.ops = match_priv->ops;
+       host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+                               sizeof(struct sdhci_brcmstb_priv));
        if (IS_ERR(host)) {
                res = PTR_ERR(host);
                goto err_clk;
        }
 
+       pltfm_host = sdhci_priv(host);
+       priv = sdhci_pltfm_priv(pltfm_host);
+       priv->has_cqe = has_cqe;
+
+       /* Map in the non-standard CFG registers */
+       iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+       if (IS_ERR(priv->cfg_regs)) {
+               res = PTR_ERR(priv->cfg_regs);
+               goto err;
+       }
+
        sdhci_get_of_property(pdev);
        res = mmc_of_parse(host->mmc);
        if (res)
                goto err;
 
+       /*
+        * If the chip has enhanced strobe and it's enabled, add
+        * callback
+        */
+       if (match_priv->hs400es &&
+           (host->mmc->caps2 & MMC_CAP2_HS400_ES))
+               host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+
        /*
         * Supply the existing CAPS, but clear the UHS modes. This
         * will allow these modes to be specified by device tree
         * properties through mmc_of_parse().
         */
        host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
-       if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+       if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
                host->caps &= ~SDHCI_CAN_64BIT;
        host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
        host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
-                       SDHCI_SUPPORT_DDR50);
-       host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
-               SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+                        SDHCI_SUPPORT_DDR50);
+       host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+       if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+               host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
 
-       res = sdhci_add_host(host);
+       res = sdhci_brcmstb_add_host(host, priv);
        if (res)
                goto err;
 
-       pltfm_host = sdhci_priv(host);
        pltfm_host->clk = clk;
        return res;
 
@@ -79,11 +314,15 @@ err_clk:
        return res;
 }
 
-static const struct of_device_id sdhci_brcm_of_match[] = {
-       { .compatible = "brcm,bcm7425-sdhci" },
-       { .compatible = "brcm,bcm7445-sdhci" },
-       {},
-};
+static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+{
+       int ret;
+
+       ret = sdhci_pltfm_unregister(pdev);
+       if (ret)
+               dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
 MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
 
 static struct platform_driver sdhci_brcmstb_driver = {
@@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = {
        },
        .probe          = sdhci_brcmstb_probe,
        .remove         = sdhci_pltfm_unregister,
+       .shutdown       = sdhci_brcmstb_shutdown,
 };
 
 module_platform_driver(sdhci_brcmstb_driver);
index ae0ec27..5827d37 100644 (file)
@@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
        return 0;
 }
 
-static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+static void *sdhci_cdns_priv(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
index 1c988d6..382f25b 100644 (file)
@@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
 struct pltfm_imx_data {
        u32 scratchpad;
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pins_default;
        struct pinctrl_state *pins_100mhz;
        struct pinctrl_state *pins_200mhz;
        const struct esdhc_soc_data *socdata;
@@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
        dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
 
        if (IS_ERR(imx_data->pinctrl) ||
-               IS_ERR(imx_data->pins_default) ||
                IS_ERR(imx_data->pins_100mhz) ||
                IS_ERR(imx_data->pins_200mhz))
                return -EINVAL;
@@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
                break;
        default:
                /* back to default state for other legacy timing */
-               pinctrl = imx_data->pins_default;
+               return pinctrl_select_default_state(mmc_dev(host->mmc));
        }
 
        return pinctrl_select_state(imx_data->pinctrl, pinctrl);
@@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
 
        mmc_of_parse_voltage(np, &host->ocr_mask);
 
-       if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
+       if (esdhc_is_usdhc(imx_data)) {
                imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
                                                ESDHC_PINCTRL_STATE_100MHZ);
                imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
                                host->mmc->parent->platform_data);
        /* write_protect */
        if (boarddata->wp_type == ESDHC_WP_GPIO) {
-               err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+               err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
                if (err) {
                        dev_err(mmc_dev(host->mmc),
                                "failed to request write-protect gpio!\n");
                        return err;
                }
-               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
        }
 
        /* card_detect */
        switch (boarddata->cd_type) {
        case ESDHC_CD_GPIO:
-               err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+               err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
                if (err) {
                        dev_err(mmc_dev(host->mmc),
                                "failed to request card-detect gpio!\n");
@@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                goto disable_ahb_clk;
        }
 
-       imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
-                                               PINCTRL_STATE_DEFAULT);
-       if (IS_ERR(imx_data->pins_default))
-               dev_warn(mmc_dev(host->mmc), "could not get default state\n");
-
        if (esdhc_is_usdhc(imx_data)) {
                host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
                host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
index a1aa21b..92f30a1 100644 (file)
@@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
        struct device *dev = &pdev->dev;
-       struct resource *res;
        int irq, ret = 0;
        struct f_sdhost_priv *priv;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "%s: no irq specified\n", __func__);
+       if (irq < 0)
                return irq;
-       }
 
        host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
        if (IS_ERR(host))
@@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
        host->ops = &sdhci_milbeaut_ops;
        host->irq = irq;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                goto err;
index 3d0bb5e..c3a160c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/regulator/consumer.h>
 
 #include "sdhci-pltfm.h"
+#include "cqhci.h"
 
 #define CORE_MCI_VERSION               0x50
 #define CORE_VERSION_MAJOR_SHIFT       28
 #define msm_host_writel(msm_host, val, host, offset) \
        msm_host->var_ops->msm_writel_relaxed(val, host, offset)
 
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1      0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN  (0x3 << 13)
+
 struct sdhci_msm_offset {
        u32 core_hc_mode;
        u32 core_mci_data_cnt;
@@ -1567,6 +1572,127 @@ out:
        __sdhci_msm_set_clock(host, clock);
 }
 
+/*****************************************************************************\
+ *                                                                           *
+ * MSM Command Queue Engine (CQE)                                            *
+ *                                                                           *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+       return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+       u32 ctrl;
+
+       /*
+        * When CQE is halted, the legacy SDHCI path operates only
+        * on 16-byte descriptors in 64bit mode.
+        */
+       if (host->flags & SDHCI_USE_64_BIT_DMA)
+               host->desc_sz = 16;
+
+       spin_lock_irqsave(&host->lock, flags);
+
+       /*
+        * During CQE command transfers, command complete bit gets latched.
+        * So s/w should clear command complete interrupt status when CQE is
+        * either halted or disabled. Otherwise unexpected SDCHI legacy
+        * interrupt gets triggered when CQE is halted/disabled.
+        */
+       ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+       ctrl |= SDHCI_INT_RESPONSE;
+       sdhci_writel(host,  ctrl, SDHCI_INT_ENABLE);
+       sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+       .enable         = sdhci_cqe_enable,
+       .disable        = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+                               struct platform_device *pdev)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       struct cqhci_host *cq_host;
+       bool dma64;
+       u32 cqcfg;
+       int ret;
+
+       /*
+        * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+        * So ensure ADMA table is allocated for 16byte descriptors.
+        */
+       if (host->caps & SDHCI_CAN_64BIT)
+               host->alloc_desc_sz = 16;
+
+       ret = sdhci_setup_host(host);
+       if (ret)
+               return ret;
+
+       cq_host = cqhci_pltfm_init(pdev);
+       if (IS_ERR(cq_host)) {
+               ret = PTR_ERR(cq_host);
+               dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+               goto cleanup;
+       }
+
+       msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+       cq_host->ops = &sdhci_msm_cqhci_ops;
+
+       dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+       ret = cqhci_init(cq_host, host->mmc, dma64);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+                               mmc_hostname(host->mmc), ret);
+               goto cleanup;
+       }
+
+       /* Disable cqe reset due to cqe enable signal */
+       cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+       cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+       cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+       /*
+        * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+        * So limit desc_sz to 12 so that the data commands that are sent
+        * during card initialization (before CQE gets enabled) would
+        * get executed without any issues.
+        */
+       if (host->flags & SDHCI_USE_64_BIT_DMA)
+               host->desc_sz = 12;
+
+       ret = __sdhci_add_host(host);
+       if (ret)
+               goto cleanup;
+
+       dev_info(&pdev->dev, "%s: CQE init: success\n",
+                       mmc_hostname(host->mmc));
+       return ret;
+
+cleanup:
+       sdhci_cleanup_host(host);
+       return ret;
+}
+
 /*
  * Platform specific register write functions. This is so that, if any
  * register write needs to be followed up by platform specific actions,
@@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
        .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
        .write_w = sdhci_msm_writew,
        .write_b = sdhci_msm_writeb,
+       .irq    = sdhci_msm_cqe_irq,
 };
 
 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        struct sdhci_host *host;
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_msm_host *msm_host;
-       struct resource *core_memres;
        struct clk *clk;
        int ret;
        u16 host_version, core_minor;
@@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        u8 core_major;
        const struct sdhci_msm_offset *msm_offset;
        const struct sdhci_msm_variant_info *var_info;
+       struct device_node *node = pdev->dev.of_node;
 
        host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
        if (IS_ERR(host))
@@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        }
 
        if (!msm_host->mci_removed) {
-               core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
-                               core_memres);
-
+               msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
                if (IS_ERR(msm_host->core_mem)) {
                        ret = PTR_ERR(msm_host->core_mem);
                        goto clk_disable;
@@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        pm_runtime_use_autosuspend(&pdev->dev);
 
        host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
-       ret = sdhci_add_host(host);
+       if (of_property_read_bool(node, "supports-cqe"))
+               ret = sdhci_msm_cqe_add_host(host, pdev);
+       else
+               ret = sdhci_add_host(host);
        if (ret)
                goto pm_runtime_disable;
        sdhci_msm_set_regulator_caps(msm_host);
index 5959e39..ab2bd31 100644 (file)
 
 #define SDHCI_AT91_PRESET_COMMON_CONF  0x400 /* drv type B, programmable clock mode */
 
+struct sdhci_at91_soc_data {
+       const struct sdhci_pltfm_data *pdata;
+       bool baseclk_is_generated_internally;
+       unsigned int divider_for_baseclk;
+};
+
 struct sdhci_at91_priv {
+       const struct sdhci_at91_soc_data *soc_data;
        struct clk *hclock;
        struct clk *gck;
        struct clk *mainck;
@@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
        .set_power              = sdhci_at91_set_power,
 };
 
-static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
        .ops = &sdhci_at91_sama5d2_ops,
 };
 
+static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
+       .pdata = &sdhci_sama5d2_pdata,
+       .baseclk_is_generated_internally = false,
+};
+
+static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
+       .pdata = &sdhci_sama5d2_pdata,
+       .baseclk_is_generated_internally = true,
+       .divider_for_baseclk = 2,
+};
+
 static const struct of_device_id sdhci_at91_dt_match[] = {
        { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+       { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
        {}
 };
 MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
@@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
        struct sdhci_host *host = dev_get_drvdata(dev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
-       int ret;
        unsigned int                    caps0, caps1;
        unsigned int                    clk_base, clk_mul;
-       unsigned int                    gck_rate, real_gck_rate;
+       unsigned int                    gck_rate, clk_base_rate;
        unsigned int                    preset_div;
 
-       /*
-        * The mult clock is provided by as a generated clock by the PMC
-        * controller. In order to set the rate of gck, we have to get the
-        * base clock rate and the clock mult from capabilities.
-        */
        clk_prepare_enable(priv->hclock);
        caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
        caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
-       clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
-       clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
-       gck_rate = clk_base * 1000000 * (clk_mul + 1);
-       ret = clk_set_rate(priv->gck, gck_rate);
-       if (ret < 0) {
-               dev_err(dev, "failed to set gck");
-               clk_disable_unprepare(priv->hclock);
-               return ret;
-       }
-       /*
-        * We need to check if we have the requested rate for gck because in
-        * some cases this rate could be not supported. If it happens, the rate
-        * is the closest one gck can provide. We have to update the value
-        * of clk mul.
-        */
-       real_gck_rate = clk_get_rate(priv->gck);
-       if (real_gck_rate != gck_rate) {
-               clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
-               caps1 &= (~SDHCI_CLOCK_MUL_MASK);
-               caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) &
-                         SDHCI_CLOCK_MUL_MASK);
-               /* Set capabilities in r/w mode. */
-               writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN,
-                      host->ioaddr + SDMMC_CACR);
-               writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
-               /* Set capabilities in ro mode. */
-               writel(0, host->ioaddr + SDMMC_CACR);
-               dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n",
-                        clk_mul, real_gck_rate);
-       }
+
+       gck_rate = clk_get_rate(priv->gck);
+       if (priv->soc_data->baseclk_is_generated_internally)
+               clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
+       else
+               clk_base_rate = clk_get_rate(priv->mainck);
+
+       clk_base = clk_base_rate / 1000000;
+       clk_mul = gck_rate / clk_base_rate - 1;
+
+       caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
+       caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+       caps1 &= ~SDHCI_CLOCK_MUL_MASK;
+       caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+       /* Set capabilities in r/w mode. */
+       writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+       writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
+       writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+       /* Set capabilities in ro mode. */
+       writel(0, host->ioaddr + SDMMC_CACR);
+
+       dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+                clk_mul, gck_rate, clk_base_rate);
 
        /*
         * We have to set preset values because it depends on the clk_mul
@@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
         * maximum sd clock value is 120 MHz instead of 208 MHz. For that
         * reason, we need to use presets to support SDR104.
         */
-       preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR12);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR25);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR50);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR104);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_DDR50);
 
@@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
 static int sdhci_at91_probe(struct platform_device *pdev)
 {
        const struct of_device_id       *match;
-       const struct sdhci_pltfm_data   *soc_data;
+       const struct sdhci_at91_soc_data        *soc_data;
        struct sdhci_host               *host;
        struct sdhci_pltfm_host         *pltfm_host;
        struct sdhci_at91_priv          *priv;
@@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev)
                return -EINVAL;
        soc_data = match->data;
 
-       host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+       host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
        if (IS_ERR(host))
                return PTR_ERR(host);
 
        pltfm_host = sdhci_priv(host);
        priv = sdhci_pltfm_priv(pltfm_host);
+       priv->soc_data = soc_data;
 
        priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
        if (IS_ERR(priv->mainck)) {
-               dev_err(&pdev->dev, "failed to get baseclk\n");
-               return PTR_ERR(priv->mainck);
+               if (soc_data->baseclk_is_generated_internally) {
+                       priv->mainck = NULL;
+               } else {
+                       dev_err(&pdev->dev, "failed to get baseclk\n");
+                       ret = PTR_ERR(priv->mainck);
+                       goto sdhci_pltfm_free;
+               }
        }
 
        priv->hclock = devm_clk_get(&pdev->dev, "hclock");
        if (IS_ERR(priv->hclock)) {
                dev_err(&pdev->dev, "failed to get hclock\n");
-               return PTR_ERR(priv->hclock);
+               ret = PTR_ERR(priv->hclock);
+               goto sdhci_pltfm_free;
        }
 
        priv->gck = devm_clk_get(&pdev->dev, "multclk");
        if (IS_ERR(priv->gck)) {
                dev_err(&pdev->dev, "failed to get multclk\n");
-               return PTR_ERR(priv->gck);
+               ret = PTR_ERR(priv->gck);
+               goto sdhci_pltfm_free;
        }
 
        ret = sdhci_at91_set_clks_presets(&pdev->dev);
index 500f70a..5d8dd87 100644 (file)
@@ -173,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
        u16 ret;
        int shift = (spec_reg & 0x2) * 8;
 
+       if (spec_reg == SDHCI_TRANSFER_MODE)
+               return pltfm_host->xfer_mode_shadow;
+
        if (spec_reg == SDHCI_HOST_VERSION)
                ret = value & 0xffff;
        else
@@ -562,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
 
 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
 {
-       u32 val;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
        ktime_t timeout;
+       u32 val, clk_en;
+
+       clk_en = ESDHC_CLOCK_SDCLKEN;
+
+       /*
+        * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
+        * is 2.2 or lower.
+        */
+       if (esdhc->vendor_ver <= VENDOR_V_22)
+               clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+                          ESDHC_CLOCK_PEREN);
 
        val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
 
        if (enable)
-               val |= ESDHC_CLOCK_SDCLKEN;
+               val |= clk_en;
        else
-               val &= ~ESDHC_CLOCK_SDCLKEN;
+               val &= ~clk_en;
 
        sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
 
-       /* Wait max 20 ms */
+       /*
+        * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+        * wait clock stable bit which does not exist.
+        */
        timeout = ktime_add_ms(ktime_get(), 20);
-       val = ESDHC_CLOCK_STABLE;
-       while  (1) {
+       while (esdhc->vendor_ver > VENDOR_V_22) {
                bool timedout = ktime_after(ktime_get(), timeout);
 
-               if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+               if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
                        break;
                if (timedout) {
                        pr_err("%s: Internal clock never stabilised.\n",
                                mmc_hostname(host->mmc));
                        break;
                }
-               udelay(10);
+               usleep_range(10, 20);
        }
 }
 
@@ -621,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-       int pre_div = 1;
-       int div = 1;
-       int division;
+       unsigned int pre_div = 1, div = 1;
+       unsigned int clock_fixup = 0;
        ktime_t timeout;
-       long fixup = 0;
        u32 temp;
 
-       host->mmc->actual_clock = 0;
-
        if (clock == 0) {
+               host->mmc->actual_clock = 0;
                esdhc_clock_enable(host, false);
                return;
        }
 
-       /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+       /* Start pre_div at 2 for vendor version < 2.3. */
        if (esdhc->vendor_ver < VENDOR_V_23)
                pre_div = 2;
 
+       /* Fix clock value. */
        if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
-               esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
-               fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+           esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+               clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
        else if (esdhc->clk_fixup)
-               fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
-
-       if (fixup && clock > fixup)
-               clock = fixup;
+               clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
 
-       temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-       temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
-                 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
-       sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+       if (clock_fixup == 0 || clock < clock_fixup)
+               clock_fixup = clock;
 
-       while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+       /* Calculate pre_div and div. */
+       while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
                pre_div *= 2;
 
-       while (host->max_clk / pre_div / div > clock && div < 16)
+       while (host->max_clk / pre_div / div > clock_fixup && div < 16)
                div++;
 
+       esdhc->div_ratio = pre_div * div;
+
+       /* Limit clock division for HS400 200MHz clock for quirk. */
        if (esdhc->quirk_limited_clk_division &&
            clock == MMC_HS200_MAX_DTR &&
            (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
             host->flags & SDHCI_HS400_TUNING)) {
-               division = pre_div * div;
-               if (division <= 4) {
+               if (esdhc->div_ratio <= 4) {
                        pre_div = 4;
                        div = 1;
-               } else if (division <= 8) {
+               } else if (esdhc->div_ratio <= 8) {
                        pre_div = 4;
                        div = 2;
-               } else if (division <= 12) {
+               } else if (esdhc->div_ratio <= 12) {
                        pre_div = 4;
                        div = 3;
                } else {
                        pr_warn("%s: using unsupported clock division.\n",
                                mmc_hostname(host->mmc));
                }
+               esdhc->div_ratio = pre_div * div;
        }
 
+       host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
+
        dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
-               clock, host->max_clk / pre_div / div);
-       host->mmc->actual_clock = host->max_clk / pre_div / div;
-       esdhc->div_ratio = pre_div * div;
+               clock, host->mmc->actual_clock);
+
+       /* Set clock division into register. */
        pre_div >>= 1;
        div--;
 
+       esdhc_clock_enable(host, false);
+
        temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-       temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
-               | (div << ESDHC_DIVIDER_SHIFT)
-               (pre_div << ESDHC_PREDIV_SHIFT));
+       temp &= ~ESDHC_CLOCK_MASK;
+       temp |= ((div << ESDHC_DIVIDER_SHIFT) |
+               (pre_div << ESDHC_PREDIV_SHIFT));
        sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
 
+       /*
+        * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+        * wait clock stable bit which does not exist.
+        */
+       timeout = ktime_add_ms(ktime_get(), 20);
+       while (esdhc->vendor_ver > VENDOR_V_22) {
+               bool timedout = ktime_after(ktime_get(), timeout);
+
+               if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+                       break;
+               if (timedout) {
+                       pr_err("%s: Internal clock never stabilised.\n",
+                               mmc_hostname(host->mmc));
+                       break;
+               }
+               usleep_range(10, 20);
+       }
+
+       /* Additional setting for HS400. */
        if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
            clock == MMC_HS200_MAX_DTR) {
                temp = sdhci_readl(host, ESDHC_TBCTL);
@@ -711,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
                esdhc_clock_enable(host, false);
                esdhc_flush_async_fifo(host);
        }
-
-       /* Wait max 20 ms */
-       timeout = ktime_add_ms(ktime_get(), 20);
-       while (1) {
-               bool timedout = ktime_after(ktime_get(), timeout);
-
-               if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
-                       break;
-               if (timedout) {
-                       pr_err("%s: Internal clock never stabilised.\n",
-                               mmc_hostname(host->mmc));
-                       return;
-               }
-               udelay(10);
-       }
-
-       temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-       temp |= ESDHC_CLOCK_SDCLKEN;
-       sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+       esdhc_clock_enable(host, true);
 }
 
 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -758,23 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-       u32 val;
+       u32 val, bus_width = 0;
 
+       /*
+        * Add delay to make sure all the DMA transfers are finished
+        * for quirk.
+        */
        if (esdhc->quirk_delay_before_data_reset &&
            (mask & SDHCI_RESET_DATA) &&
            (host->flags & SDHCI_REQ_USE_DMA))
                mdelay(5);
 
+       /*
+        * Save bus-width for eSDHC whose vendor version is 2.2
+        * or lower for data reset.
+        */
+       if ((mask & SDHCI_RESET_DATA) &&
+           (esdhc->vendor_ver <= VENDOR_V_22)) {
+               val = sdhci_readl(host, ESDHC_PROCTL);
+               bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+       }
+
        sdhci_reset(host, mask);
 
-       sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
-       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+       /*
+        * Restore bus-width setting and interrupt registers for eSDHC
+        * whose vendor version is 2.2 or lower for data reset.
+        */
+       if ((mask & SDHCI_RESET_DATA) &&
+           (esdhc->vendor_ver <= VENDOR_V_22)) {
+               val = sdhci_readl(host, ESDHC_PROCTL);
+               val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+               val |= bus_width;
+               sdhci_writel(host, val, ESDHC_PROCTL);
 
-       if (mask & SDHCI_RESET_ALL) {
+               sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+               sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+       }
+
+       /*
+        * Some bits have to be cleaned manually for eSDHC whose spec
+        * version is higher than 3.0 for all reset.
+        */
+       if ((mask & SDHCI_RESET_ALL) &&
+           (esdhc->spec_ver >= SDHCI_SPEC_300)) {
                val = sdhci_readl(host, ESDHC_TBCTL);
                val &= ~ESDHC_TB_EN;
                sdhci_writel(host, val, ESDHC_TBCTL);
 
+               /*
+                * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+                * 0 for quirk.
+                */
                if (esdhc->quirk_unreliable_pulse_detection) {
                        val = sdhci_readl(host, ESDHC_DLLCFG1);
                        val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
@@ -854,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
 }
 
 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
-       { .family = "QorIQ T1023", .revision = "1.0", },
-       { .family = "QorIQ T1040", .revision = "1.0", },
-       { .family = "QorIQ T2080", .revision = "1.0", },
-       { .family = "QorIQ LS1021A", .revision = "1.0", },
+       { .family = "QorIQ T1023", },
+       { .family = "QorIQ T1040", },
+       { .family = "QorIQ T2080", },
+       { .family = "QorIQ LS1021A", },
        { },
 };
 
 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
-       { .family = "QorIQ LS1012A", .revision = "1.0", },
-       { .family = "QorIQ LS1043A", .revision = "1.*", },
-       { .family = "QorIQ LS1046A", .revision = "1.0", },
-       { .family = "QorIQ LS1080A", .revision = "1.0", },
-       { .family = "QorIQ LS2080A", .revision = "1.0", },
-       { .family = "QorIQ LA1575A", .revision = "1.0", },
+       { .family = "QorIQ LS1012A", },
+       { .family = "QorIQ LS1043A", },
+       { .family = "QorIQ LS1046A", },
+       { .family = "QorIQ LS1080A", },
+       { .family = "QorIQ LS2080A", },
+       { .family = "QorIQ LA1575A", },
        { },
 };
 
@@ -888,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
        esdhc_clock_enable(host, true);
 }
 
-static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
                                    u8 *window_end)
 {
-       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-       u8 tbstat_15_8, tbstat_7_0;
        u32 val;
 
-       if (esdhc->quirk_tuning_erratum_type1) {
-               *window_start = 5 * esdhc->div_ratio;
-               *window_end = 3 * esdhc->div_ratio;
-               return;
-       }
-
        /* Write TBCTL[11:8]=4'h8 */
        val = sdhci_readl(host, ESDHC_TBCTL);
        val &= ~(0xf << 8);
@@ -920,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
        val = sdhci_readl(host, ESDHC_TBSTAT);
        val = sdhci_readl(host, ESDHC_TBSTAT);
 
+       *window_end = val & 0xff;
+       *window_start = (val >> 8) & 0xff;
+}
+
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+                                   u8 *window_end)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+       u8 start_ptr, end_ptr;
+
+       if (esdhc->quirk_tuning_erratum_type1) {
+               *window_start = 5 * esdhc->div_ratio;
+               *window_end = 3 * esdhc->div_ratio;
+               return;
+       }
+
+       esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
+
        /* Reset data lines by setting ESDHCCTL[RSTD] */
        sdhci_reset(host, SDHCI_RESET_DATA);
        /* Write 32'hFFFF_FFFF to IRQSTAT register */
        sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
 
-       /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
-        * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+       /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
+        * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
         * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
         * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
         */
-       tbstat_7_0 = val & 0xff;
-       tbstat_15_8 = (val >> 8) & 0xff;
 
-       if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+       if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
                *window_start = 8 * esdhc->div_ratio;
                *window_end = 4 * esdhc->div_ratio;
        } else {
@@ -1006,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
                if (ret)
                        break;
 
+               /* For type2 affected platforms of the tuning erratum,
+                * tuning may succeed although eSDHC might not have
+                * tuned properly. Need to check tuning window.
+                */
+               if (esdhc->quirk_tuning_erratum_type2 &&
+                   !host->tuning_err) {
+                       esdhc_tuning_window_ptr(host, &window_start,
+                                               &window_end);
+                       if (abs(window_start - window_end) >
+                           (4 * esdhc->div_ratio + 2))
+                               host->tuning_err = -EAGAIN;
+               }
+
                /* If HW tuning fails and triggers erratum,
                 * try workaround.
                 */
@@ -1238,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
                 * 1/2 peripheral clock.
                 */
                if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
-                   of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
+                   of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
+                   of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
                        esdhc->peripheral_clock = clk_get_rate(clk) / 2;
                else
                        esdhc->peripheral_clock = clk_get_rate(clk);
index 083e7e0..8820531 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/mmc/mmc.h>
 #include <linux/mmc/slot-gpio.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -85,6 +86,7 @@
 
 /* sdhci-omap controller flags */
 #define SDHCI_OMAP_REQUIRE_IODELAY     BIT(0)
+#define SDHCI_OMAP_SPECIAL_RESET       BIT(1)
 
 struct sdhci_omap_data {
        u32 offset;
@@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host)
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
 
        reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
-       reg |= CON_DMA_MASTER;
+       reg &= ~CON_DMA_MASTER;
+       /* Switch to DMA slave mode when using external DMA */
+       if (!host->use_external_dma)
+               reg |= CON_DMA_MASTER;
+
        sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
 
        return 0;
@@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
        sdhci_omap_start_clock(omap_host);
 }
 
+#define MMC_TIMEOUT_US         20000           /* 20000 micro Sec */
 static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+       unsigned long limit = MMC_TIMEOUT_US;
+       unsigned long i = 0;
 
        /* Don't reset data lines during tuning operation */
        if (omap_host->is_tuning)
                mask &= ~SDHCI_RESET_DATA;
 
+       if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
+               sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+               while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
+                      (i++ < limit))
+                       udelay(1);
+               i = 0;
+               while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
+                      (i++ < limit))
+                       udelay(1);
+
+               if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
+                       dev_err(mmc_dev(host->mmc),
+                               "Timeout waiting on controller reset in %s\n",
+                               __func__);
+               return;
+       }
+
        sdhci_reset(host, mask);
 }
 
@@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
        return intmask;
 }
 
+static void sdhci_omap_set_timeout(struct sdhci_host *host,
+                                  struct mmc_command *cmd)
+{
+       if (cmd->opcode == MMC_ERASE)
+               sdhci_set_data_timeout_irq(host, false);
+
+       __sdhci_set_timeout(host, cmd);
+}
+
 static struct sdhci_ops sdhci_omap_ops = {
        .set_clock = sdhci_omap_set_clock,
        .set_power = sdhci_omap_set_power,
@@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = {
        .reset = sdhci_omap_reset,
        .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
        .irq = sdhci_omap_irq,
+       .set_timeout = sdhci_omap_set_timeout,
 };
 
 static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = {
        .offset = 0x200,
 };
 
+static const struct sdhci_omap_data am335_data = {
+       .offset = 0x200,
+       .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data am437_data = {
+       .offset = 0x200,
+       .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
 static const struct sdhci_omap_data dra7_data = {
        .offset = 0x200,
        .flags  = SDHCI_OMAP_REQUIRE_IODELAY,
@@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = {
 static const struct of_device_id omap_sdhci_match[] = {
        { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
        { .compatible = "ti,k2g-sdhci", .data = &k2g_data },
+       { .compatible = "ti,am335-sdhci", .data = &am335_data },
+       { .compatible = "ti,am437-sdhci", .data = &am437_data },
        {},
 };
 MODULE_DEVICE_TABLE(of, omap_sdhci_match);
@@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        struct sdhci_omap_data *data;
        const struct soc_device_attribute *soc;
+       struct resource *regs;
 
        match = of_match_device(omap_sdhci_match, dev);
        if (!match)
@@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        }
        offset = data->offset;
 
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs)
+               return -ENXIO;
+
        host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
                                sizeof(*omap_host));
        if (IS_ERR(host)) {
@@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        omap_host->timing = MMC_TIMING_LEGACY;
        omap_host->flags = data->flags;
        host->ioaddr += offset;
+       host->mapbase = regs->start + offset;
 
        mmc = host->mmc;
        sdhci_get_of_property(pdev);
@@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
        host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
 
+       /* Switch to external DMA only if there is the "dmas" property */
+       if (of_find_property(dev->of_node, "dmas", NULL))
+               sdhci_switch_external_dma(host, true);
+
        ret = sdhci_setup_host(host);
        if (ret)
                goto err_put_sync;
index 5091e2c..525de24 100644 (file)
@@ -1991,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
 
        if (slot->cd_idx >= 0) {
                ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
-                                          slot->cd_override_level, 0, NULL);
+                                          slot->cd_override_level, 0);
                if (ret && ret != -EPROBE_DEFER)
                        ret = mmc_gpiod_request_cd(host->mmc, NULL,
                                                   slot->cd_idx,
                                                   slot->cd_override_level,
-                                                  0, NULL);
+                                                  0);
                if (ret == -EPROBE_DEFER)
                        goto remove;
 
index 51e096f..64200c7 100644 (file)
@@ -117,7 +117,6 @@ struct sdhci_s3c {
        struct s3c_sdhci_platdata *pdata;
        int                     cur_clk;
        int                     ext_cd_irq;
-       int                     ext_cd_gpio;
 
        struct clk              *clk_io;
        struct clk              *clk_bus[MAX_BUS_CLK];
@@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct sdhci_host *host;
        struct sdhci_s3c *sc;
-       struct resource *res;
        int ret, irq, ptr, clks;
 
        if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
                        goto err_pdata_io_clk;
        } else {
                memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
-               sc->ext_cd_gpio = -1; /* invalid gpio number */
        }
 
        drv_data = sdhci_s3c_get_driver_data(pdev);
@@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
                goto err_no_busclks;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                goto err_req_regs;
index e431432..f4b05dd 100644 (file)
@@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
         * We must request the IRQ after sdhci_add_host(), as the tasklet only
         * gets setup in sdhci_add_host() and we oops.
         */
-       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                goto err_request_cd;
        if (!ret)
index 916b5b0..b4b6308 100644 (file)
@@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
 static int sdhci_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
-       struct resource *iomem;
        struct spear_sdhci *sdhci;
        struct device *dev;
        int ret;
@@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev)
                goto err;
        }
 
-       iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
@@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev)
         * It is optional to use GPIOs for sdhci card detection. If we
         * find a descriptor using slot GPIO, we use it.
         */
-       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                goto disable_clk;
 
index 7bc9505..403ac44 100644 (file)
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
                        misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
                if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
                        misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
-               if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+               if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
                        clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
        }
 
index 1b1c26d..63db844 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/dmaengine.h>
 #include <linux/ktime.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
@@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 }
 
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
 {
        if (enable)
                host->ier |= SDHCI_INT_DATA_TIMEOUT;
@@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 }
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
 
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 {
-       u8 count;
-
-       if (host->ops->set_timeout) {
-               host->ops->set_timeout(host, cmd);
-       } else {
-               bool too_big = false;
-
-               count = sdhci_calc_timeout(host, cmd, &too_big);
-
-               if (too_big &&
-                   host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
-                       sdhci_calc_sw_timeout(host, cmd);
-                       sdhci_set_data_timeout_irq(host, false);
-               } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
-                       sdhci_set_data_timeout_irq(host, true);
-               }
-
-               sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+       bool too_big = false;
+       u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+       if (too_big &&
+           host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+               sdhci_calc_sw_timeout(host, cmd);
+               sdhci_set_data_timeout_irq(host, false);
+       } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+               sdhci_set_data_timeout_irq(host, true);
        }
+
+       sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
 }
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
 
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 {
-       struct mmc_data *data = cmd->data;
-
-       host->data_timeout = 0;
-
-       if (sdhci_data_line_cmd(cmd))
-               sdhci_set_timeout(host, cmd);
-
-       if (!data)
-               return;
+       if (host->ops->set_timeout)
+               host->ops->set_timeout(host, cmd);
+       else
+               __sdhci_set_timeout(host, cmd);
+}
 
+static void sdhci_initialize_data(struct sdhci_host *host,
+                                 struct mmc_data *data)
+{
        WARN_ON(host->data);
 
        /* Sanity checks */
@@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
        host->data = data;
        host->data_early = 0;
        host->data->bytes_xfered = 0;
+}
+
+static inline void sdhci_set_block_info(struct sdhci_host *host,
+                                       struct mmc_data *data)
+{
+       /* Set the DMA boundary value and block size */
+       sdhci_writew(host,
+                    SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+                    SDHCI_BLOCK_SIZE);
+       /*
+        * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+        * can be supported, in that case 16-bit block count register must be 0.
+        */
+       if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+           (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+               if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+                       sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+               sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+       } else {
+               sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+       }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+       struct mmc_data *data = cmd->data;
+
+       sdhci_initialize_data(host, data);
 
        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
                struct scatterlist *sg;
@@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 
        sdhci_set_transfer_irqs(host);
 
-       /* Set the DMA boundary value and block size */
-       sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
-                    SDHCI_BLOCK_SIZE);
+       sdhci_set_block_info(host, data);
+}
 
-       /*
-        * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
-        * can be supported, in that case 16-bit block count register must be 0.
-        */
-       if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
-           (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
-               if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
-                       sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
-               sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+
+static int sdhci_external_dma_init(struct sdhci_host *host)
+{
+       int ret = 0;
+       struct mmc_host *mmc = host->mmc;
+
+       host->tx_chan = dma_request_chan(mmc->parent, "tx");
+       if (IS_ERR(host->tx_chan)) {
+               ret = PTR_ERR(host->tx_chan);
+               if (ret != -EPROBE_DEFER)
+                       pr_warn("Failed to request TX DMA channel.\n");
+               host->tx_chan = NULL;
+               return ret;
+       }
+
+       host->rx_chan = dma_request_chan(mmc->parent, "rx");
+       if (IS_ERR(host->rx_chan)) {
+               if (host->tx_chan) {
+                       dma_release_channel(host->tx_chan);
+                       host->tx_chan = NULL;
+               }
+
+               ret = PTR_ERR(host->rx_chan);
+               if (ret != -EPROBE_DEFER)
+                       pr_warn("Failed to request RX DMA channel.\n");
+               host->rx_chan = NULL;
+       }
+
+       return ret;
+}
+
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+                                                  struct mmc_data *data)
+{
+       return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static int sdhci_external_dma_setup(struct sdhci_host *host,
+                                   struct mmc_command *cmd)
+{
+       int ret, i;
+       enum dma_transfer_direction dir;
+       struct dma_async_tx_descriptor *desc;
+       struct mmc_data *data = cmd->data;
+       struct dma_chan *chan;
+       struct dma_slave_config cfg;
+       dma_cookie_t cookie;
+       int sg_cnt;
+
+       if (!host->mapbase)
+               return -EINVAL;
+
+       cfg.src_addr = host->mapbase + SDHCI_BUFFER;
+       cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
+       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       cfg.src_maxburst = data->blksz / 4;
+       cfg.dst_maxburst = data->blksz / 4;
+
+       /* Sanity check: all the SG entries must be aligned by block size. */
+       for (i = 0; i < data->sg_len; i++) {
+               if ((data->sg + i)->length % data->blksz)
+                       return -EINVAL;
+       }
+
+       chan = sdhci_external_dma_channel(host, data);
+
+       ret = dmaengine_slave_config(chan, &cfg);
+       if (ret)
+               return ret;
+
+       sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+       if (sg_cnt <= 0)
+               return -EINVAL;
+
+       dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+       desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
+                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc)
+               return -EINVAL;
+
+       desc->callback = NULL;
+       desc->callback_param = NULL;
+
+       cookie = dmaengine_submit(desc);
+       if (dma_submit_error(cookie))
+               ret = cookie;
+
+       return ret;
+}
+
+static void sdhci_external_dma_release(struct sdhci_host *host)
+{
+       if (host->tx_chan) {
+               dma_release_channel(host->tx_chan);
+               host->tx_chan = NULL;
+       }
+
+       if (host->rx_chan) {
+               dma_release_channel(host->rx_chan);
+               host->rx_chan = NULL;
+       }
+
+       sdhci_switch_external_dma(host, false);
+}
+
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
+                                             struct mmc_command *cmd)
+{
+       struct mmc_data *data = cmd->data;
+
+       sdhci_initialize_data(host, data);
+
+       host->flags |= SDHCI_REQ_USE_DMA;
+       sdhci_set_transfer_irqs(host);
+
+       sdhci_set_block_info(host, data);
+}
+
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+                                           struct mmc_command *cmd)
+{
+       if (!sdhci_external_dma_setup(host, cmd)) {
+               __sdhci_external_dma_prepare_data(host, cmd);
        } else {
-               sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+               sdhci_external_dma_release(host);
+               pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
+                      mmc_hostname(host->mmc));
+               sdhci_prepare_data(host, cmd);
        }
 }
 
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+                                           struct mmc_command *cmd)
+{
+       struct dma_chan *chan;
+
+       if (!cmd->data)
+               return;
+
+       chan = sdhci_external_dma_channel(host, cmd->data);
+       if (chan)
+               dma_async_issue_pending(chan);
+}
+
+#else
+
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+                                                  struct mmc_command *cmd)
+{
+       /* This should never happen */
+       WARN_ON_ONCE(1);
+}
+
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+                                                  struct mmc_command *cmd)
+{
+}
+
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+                                                         struct mmc_data *data)
+{
+       return NULL;
+}
+
+#endif
+
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
+{
+       host->use_external_dma = en;
+}
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
+
 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
                                    struct mmc_request *mrq)
 {
@@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
                 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
 }
 
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
 {
        int i;
 
-       if (host->cmd && host->cmd->mrq == mrq)
-               host->cmd = NULL;
-
-       if (host->data_cmd && host->data_cmd->mrq == mrq)
-               host->data_cmd = NULL;
-
-       if (host->data && host->data->mrq == mrq)
-               host->data = NULL;
-
-       if (sdhci_needs_reset(host, mrq))
-               host->pending_reset = true;
-
        for (i = 0; i < SDHCI_MAX_MRQS; i++) {
                if (host->mrqs_done[i] == mrq) {
                        WARN_ON(1);
@@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
        }
 
        WARN_ON(i >= SDHCI_MAX_MRQS);
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+       if (host->cmd && host->cmd->mrq == mrq)
+               host->cmd = NULL;
+
+       if (host->data_cmd && host->data_cmd->mrq == mrq)
+               host->data_cmd = NULL;
+
+       if (host->data && host->data->mrq == mrq)
+               host->data = NULL;
+
+       if (sdhci_needs_reset(host, mrq))
+               host->pending_reset = true;
+
+       sdhci_set_mrq_done(host, mrq);
 
        sdhci_del_timer(host, mrq);
 
@@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host)
 
        /*
         * Need to send CMD12 if -
-        * a) open-ended multiblock transfer (no CMD23)
+        * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
         * b) error in multiblock transfer
         */
        if (data->stop &&
-           (data->error ||
-            !data->mrq->sbc)) {
+           ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
+            data->error)) {
                /*
                 * 'cap_cmd_during_tfr' request must not use the command line
                 * after mmc_command_done() has been called. It is upper layer's
@@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
        }
 
        host->cmd = cmd;
+       host->data_timeout = 0;
        if (sdhci_data_line_cmd(cmd)) {
                WARN_ON(host->data_cmd);
                host->data_cmd = cmd;
+               sdhci_set_timeout(host, cmd);
        }
 
-       sdhci_prepare_data(host, cmd);
+       if (cmd->data) {
+               if (host->use_external_dma)
+                       sdhci_external_dma_prepare_data(host, cmd);
+               else
+                       sdhci_prepare_data(host, cmd);
+       }
 
        sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
 
@@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
                timeout += 10 * HZ;
        sdhci_mod_timer(host, cmd->mrq, timeout);
 
+       if (host->use_external_dma)
+               sdhci_external_dma_pre_transfer(host, cmd);
+
        sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
 }
 EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_led_activate(host);
 
-       /*
-        * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
-        * requests if Auto-CMD12 is enabled.
-        */
-       if (sdhci_auto_cmd12(host, mrq)) {
-               if (mrq->stop) {
-                       mrq->data->stop = NULL;
-                       mrq->stop = NULL;
-               }
-       }
-
        if (!present || host->flags & SDHCI_DEVICE_DEAD) {
                mrq->cmd->error = -ENOMEDIUM;
                sdhci_finish_mrq(host, mrq);
@@ -2661,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
        if (host->flags & SDHCI_REQ_USE_DMA) {
                struct mmc_data *data = mrq->data;
 
+               if (host->use_external_dma && data &&
+                   (mrq->cmd->error || data->error)) {
+                       struct dma_chan *chan = sdhci_external_dma_channel(host, data);
+
+                       host->mrqs_done[i] = NULL;
+                       spin_unlock_irqrestore(&host->lock, flags);
+                       dmaengine_terminate_sync(chan);
+                       spin_lock_irqsave(&host->lock, flags);
+                       sdhci_set_mrq_done(host, mrq);
+               }
+
                if (data && data->host_cookie == COOKIE_MAPPED) {
                        if (host->bounce_buffer) {
                                /*
@@ -3796,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host)
        if (sdhci_can_64bit_dma(host))
                host->flags |= SDHCI_USE_64_BIT_DMA;
 
+       if (host->use_external_dma) {
+               ret = sdhci_external_dma_init(host);
+               if (ret == -EPROBE_DEFER)
+                       goto unreg;
+               /*
+                * Fall back to use the DMA/PIO integrated in standard SDHCI
+                * instead of external DMA devices.
+                */
+               else if (ret)
+                       sdhci_switch_external_dma(host, false);
+               /* Disable internal DMA sources */
+               else
+                       host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+       }
+
        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
                if (host->ops->set_dma_mask)
                        ret = host->ops->set_dma_mask(host);
@@ -3822,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host)
                dma_addr_t dma;
                void *buf;
 
-               if (host->flags & SDHCI_USE_64_BIT_DMA) {
-                       host->adma_table_sz = host->adma_table_cnt *
-                                             SDHCI_ADMA2_64_DESC_SZ(host);
-                       host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
-               } else {
-                       host->adma_table_sz = host->adma_table_cnt *
-                                             SDHCI_ADMA2_32_DESC_SZ;
-                       host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
-               }
+               if (!(host->flags & SDHCI_USE_64_BIT_DMA))
+                       host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+               else if (!host->alloc_desc_sz)
+                       host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+
+               host->desc_sz = host->alloc_desc_sz;
+               host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
 
                host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
                /*
@@ -3913,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host)
        if (host->ops->get_min_clock)
                mmc->f_min = host->ops->get_min_clock(host);
        else if (host->version >= SDHCI_SPEC_300) {
-               if (host->clk_mul) {
-                       mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+               if (host->clk_mul)
                        max_clk = host->max_clk * host->clk_mul;
-               } else
-                       mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+               /*
+                * Divided Clock Mode minimum clock rate is always less than
+                * Programmable Clock Mode minimum clock rate.
+                */
+               mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
        } else
                mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
@@ -4276,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host)
                dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
                                  host->adma_table_sz, host->align_buffer,
                                  host->align_addr);
+
+       if (host->use_external_dma)
+               sdhci_external_dma_release(host);
+
        host->adma_table = NULL;
        host->align_buffer = NULL;
 }
@@ -4321,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host)
 
        pr_info("%s: SDHCI controller on %s [%s] using %s\n",
                mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+               host->use_external_dma ? "External DMA" :
                (host->flags & SDHCI_USE_ADMA) ?
                (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
                (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -4409,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
                                  host->adma_table_sz, host->align_buffer,
                                  host->align_addr);
 
+       if (host->use_external_dma)
+               sdhci_external_dma_release(host);
+
        host->adma_table = NULL;
        host->align_buffer = NULL;
 }
index fe83ece..a6a3ddc 100644 (file)
@@ -487,6 +487,7 @@ struct sdhci_host {
 
        int irq;                /* Device IRQ */
        void __iomem *ioaddr;   /* Mapped address */
+       phys_addr_t mapbase;    /* physical address base */
        char *bounce_buffer;    /* For packing SDMA reads/writes */
        dma_addr_t bounce_addr;
        unsigned int bounce_buffer_size;
@@ -535,6 +536,7 @@ struct sdhci_host {
        bool pending_reset;     /* Cmd/data reset is pending */
        bool irq_wake_enabled;  /* IRQ wakeup is enabled */
        bool v4_mode;           /* Host Version 4 Enable */
+       bool use_external_dma;  /* Host selects to use external DMA */
 
        struct mmc_request *mrqs_done[SDHCI_MAX_MRQS];  /* Requests done */
        struct mmc_command *cmd;        /* Current command */
@@ -556,7 +558,8 @@ struct sdhci_host {
        dma_addr_t adma_addr;   /* Mapped ADMA descr. table */
        dma_addr_t align_addr;  /* Mapped bounce buffer */
 
-       unsigned int desc_sz;   /* ADMA descriptor size */
+       unsigned int desc_sz;   /* ADMA current descriptor size */
+       unsigned int alloc_desc_sz;     /* ADMA descr. max size host supports */
 
        struct workqueue_struct *complete_wq;   /* Request completion wq */
        struct work_struct      complete_work;  /* Request completion work */
@@ -564,6 +567,11 @@ struct sdhci_host {
        struct timer_list timer;        /* Timer for timeouts */
        struct timer_list data_timer;   /* Timer for data timeouts */
 
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+       struct dma_chan *rx_chan;
+       struct dma_chan *tx_chan;
+#endif
+
        u32 caps;               /* CAPABILITY_0 */
        u32 caps1;              /* CAPABILITY_1 */
        bool read_caps;         /* Capability flags have been read */
@@ -795,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host);
 void sdhci_reset_tuning(struct sdhci_host *host);
 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
 
 #endif /* __SDHCI_HW_H */
index b8e897e..3afea58 100644 (file)
@@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
        writeb(val, host->ioaddr + reg);
 }
 
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       int err = sdhci_execute_tuning(mmc, opcode);
+
+       if (err)
+               return err;
+       /*
+        * Tuning data remains in the buffer after tuning.
+        * Do a command and data reset to get rid of it
+        */
+       sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+       return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+       return 0;
+}
+
 static struct sdhci_ops sdhci_am654_ops = {
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = {
        .set_power = sdhci_am654_set_power,
        .set_clock = sdhci_am654_set_clock,
        .write_b = sdhci_am654_write_b,
+       .irq = sdhci_am654_cqhci_irq,
        .reset = sdhci_reset,
 };
 
 static const struct sdhci_pltfm_data sdhci_am654_pdata = {
        .ops = &sdhci_am654_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
        .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
 };
 
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
-       int cmd_error = 0;
-       int data_error = 0;
-
-       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
-               return intmask;
-
-       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
-       return 0;
-}
-
 static struct sdhci_ops sdhci_j721e_8bit_ops = {
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
        .ops = &sdhci_j721e_8bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
        .ops = &sdhci_j721e_4bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
        struct sdhci_am654_data *sdhci_am654;
        const struct of_device_id *match;
        struct sdhci_host *host;
-       struct resource *res;
        struct clk *clk_xin;
        struct device *dev = &pdev->dev;
        void __iomem *base;
@@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
                goto pm_runtime_disable;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       base = devm_ioremap_resource(dev, res);
+       base = devm_platform_ioremap_resource(pdev, 1);
        if (IS_ERR(base)) {
                ret = PTR_ERR(base);
                goto pm_runtime_put;
@@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
                goto pm_runtime_put;
        }
 
+       host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
        ret = sdhci_am654_init(host);
        if (ret)
                goto pm_runtime_put;
index fa0dfc6..4625cc0 100644 (file)
@@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
        struct device *dev = &pdev->dev;
-       struct resource *res;
        int irq, ctrl = 0, ret = 0;
        struct f_sdhost_priv *priv;
        u32 reg = 0;
@@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
        host->ops = &sdhci_f_sdh30_ops;
        host->irq = irq;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                goto err;
index 98c575d..7e1fd55 100644 (file)
@@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
                host->chan_rx = sh_mmcif_request_dma_pdata(host,
                                                        pdata->slave_id_rx);
        } else {
-               host->chan_tx = dma_request_slave_channel(dev, "tx");
-               host->chan_rx = dma_request_slave_channel(dev, "rx");
+               host->chan_tx = dma_request_chan(dev, "tx");
+               if (IS_ERR(host->chan_tx))
+                       host->chan_tx = NULL;
+               host->chan_rx = dma_request_chan(dev, "rx");
+               if (IS_ERR(host->chan_rx))
+                       host->chan_rx = NULL;
        }
        dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
                host->chan_rx);
@@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        struct sh_mmcif_host *host;
        struct device *dev = &pdev->dev;
        struct sh_mmcif_plat_data *pd = dev->platform_data;
-       struct resource *res;
        void __iomem *reg;
        const char *name;
 
@@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        if (irq[0] < 0)
                return -ENXIO;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       reg = devm_ioremap_resource(dev, res);
+       reg = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(reg))
                return PTR_ERR(reg);
 
index d577a6b..f87d796 100644 (file)
@@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
        if (ret)
                return ret;
 
-       host->reg_base = devm_ioremap_resource(&pdev->dev,
-                             platform_get_resource(pdev, IORESOURCE_MEM, 0));
+       host->reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->reg_base))
                return PTR_ERR(host->reg_base);
 
index c4a1d49..1e424bc 100644 (file)
@@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
 {
        struct tmio_mmc_host *host;
        struct mmc_host *mmc;
-       struct resource *res;
        void __iomem *ctl;
        int ret;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctl = devm_ioremap_resource(&pdev->dev, res);
+       ctl = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctl))
                return ERR_CAST(ctl);
 
@@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
         * Look for a card detect GPIO, if it fails with anything
         * else than a probe deferral, just live without it.
         */
-       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                return ret;
 
index 0c72ec5..a1683c4 100644 (file)
@@ -59,7 +59,6 @@
 struct uniphier_sd_priv {
        struct tmio_mmc_data tmio_data;
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pinstate_default;
        struct pinctrl_state *pinstate_uhs;
        struct clk *clk;
        struct reset_control *rst;
@@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
 {
        struct tmio_mmc_host *host = mmc_priv(mmc);
        struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
-       struct pinctrl_state *pinstate;
+       struct pinctrl_state *pinstate = NULL;
        u32 val, tmp;
 
        switch (ios->signal_voltage) {
        case MMC_SIGNAL_VOLTAGE_330:
                val = UNIPHIER_SD_VOLT_330;
-               pinstate = priv->pinstate_default;
                break;
        case MMC_SIGNAL_VOLTAGE_180:
                val = UNIPHIER_SD_VOLT_180;
@@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
        tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
        writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
 
-       pinctrl_select_state(priv->pinctrl, pinstate);
+       if (pinstate)
+               pinctrl_select_state(priv->pinctrl, pinstate);
+       else
+               pinctrl_select_default_state(mmc_dev(mmc));
 
        return 0;
 }
@@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
        if (IS_ERR(priv->pinctrl))
                return PTR_ERR(priv->pinctrl);
 
-       priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
-                                                     PINCTRL_STATE_DEFAULT);
-       if (IS_ERR(priv->pinstate_default))
-               return PTR_ERR(priv->pinstate_default);
-
        priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
        if (IS_ERR(priv->pinstate_uhs))
                return PTR_ERR(priv->pinstate_uhs);
index b11ac23..9a0b1e4 100644 (file)
@@ -199,7 +199,6 @@ struct usdhi6_host {
 
        /* Pin control */
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pins_default;
        struct pinctrl_state *pins_uhs;
 };
 
@@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
        };
        int ret;
 
-       host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+       host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
        dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
                host->chan_tx);
 
-       if (!host->chan_tx)
+       if (IS_ERR(host->chan_tx)) {
+               host->chan_tx = NULL;
                return;
+       }
 
        cfg.direction = DMA_MEM_TO_DEV;
        cfg.dst_addr = start + USDHI6_SD_BUF0;
@@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
        if (ret < 0)
                goto e_release_tx;
 
-       host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+       host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
        dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
                host->chan_rx);
 
-       if (!host->chan_rx)
+       if (IS_ERR(host->chan_rx)) {
+               host->chan_rx = NULL;
                goto e_release_tx;
+       }
 
        cfg.direction = DMA_DEV_TO_MEM;
        cfg.src_addr = cfg.dst_addr;
@@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
                                            host->pins_uhs);
 
        default:
-               return pinctrl_select_state(host->pinctrl,
-                                           host->pins_default);
+               return pinctrl_select_default_state(mmc_dev(host->mmc));
        }
 }
 
@@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev)
        }
 
        host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
-       if (!IS_ERR(host->pins_uhs)) {
-               host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                                                         PINCTRL_STATE_DEFAULT);
-
-               if (IS_ERR(host->pins_default)) {
-                       dev_err(dev,
-                               "UHS pinctrl requires a default pin state.\n");
-                       ret = PTR_ERR(host->pins_default);
-                       goto e_free_mmc;
-               }
-       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        host->base = devm_ioremap_resource(dev, res);
index 2e57122..2f5c287 100644 (file)
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
  */
 static void slcan_write_wakeup(struct tty_struct *tty)
 {
-       struct slcan *sl = tty->disc_data;
+       struct slcan *sl;
+
+       rcu_read_lock();
+       sl = rcu_dereference(tty->disc_data);
+       if (!sl)
+               goto out;
 
        schedule_work(&sl->tx_work);
+out:
+       rcu_read_unlock();
 }
 
 /* Send a can_frame to a TTY queue. */
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
                return;
 
        spin_lock_bh(&sl->lock);
-       tty->disc_data = NULL;
+       rcu_assign_pointer(tty->disc_data, NULL);
        sl->tty = NULL;
        spin_unlock_bh(&sl->lock);
 
+       synchronize_rcu();
        flush_work(&sl->tx_work);
 
        /* Flush network side */
index 120fa05..0a8624b 100644 (file)
@@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  DMA_END_ADDR);
 
        /* Initialize Tx NAPI */
-       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
-                      NAPI_POLL_WEIGHT);
+       netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+                         NAPI_POLL_WEIGHT);
 }
 
 /* Initialize a RDMA ring */
index 58f89f6..97ff860 100644 (file)
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
                if (!is_offload(adapter))
                        return -EOPNOTSUPP;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
                if (!(adapter->flags & FULL_INIT_DONE))
                        return -EIO;    /* need the memory controllers */
                if (copy_from_user(&t, useraddr, sizeof(t)))
index aca9f7a..4144c23 100644 (file)
@@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
 static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = seq_tab_get_idx(seq->private, *pos + 1);
-       if (v)
-               ++*pos;
+       ++(*pos);
        return v;
 }
 
index e9e4500..1a16449 100644 (file)
@@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = l2t_get_idx(seq, *pos);
-       if (v)
-               ++*pos;
+       ++(*pos);
        return v;
 }
 
index 41c6fa2..e190187 100644 (file)
@@ -110,7 +110,7 @@ do {                                                                        \
 /* Interface Mode Register (IF_MODE) */
 
 #define IF_MODE_MASK           0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII          0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G            0x00000000 /* 30-31 10G interface */
 #define IF_MODE_GMII           0x00000002 /* 30-31 GMII (1G) interface */
 #define IF_MODE_RGMII          0x00000004
 #define IF_MODE_RGMII_AUTO     0x00008000
@@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
        tmp = 0;
        switch (phy_if) {
        case PHY_INTERFACE_MODE_XGMII:
-               tmp |= IF_MODE_XGMII;
+               tmp |= IF_MODE_10G;
                break;
        default:
                tmp |= IF_MODE_GMII;
index e03b30c..c82c85e 100644 (file)
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
 struct mdio_fsl_priv {
        struct  tgec_mdio_controller __iomem *mdio_base;
        bool    is_little_endian;
+       bool    has_a011043;
 };
 
 static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
                return ret;
 
        /* Return all Fs if nothing was there */
-       if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+       if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+           !priv->has_a011043) {
                dev_err(&bus->dev,
                        "Error while reading PHY%d reg at %d.%hhu\n",
                        phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
                                                       "little-endian");
 
+       priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+                                                 "fsl,erratum-a011043");
+
        ret = of_mdiobus_register(bus, np);
        if (ret) {
                dev_err(&pdev->dev, "cannot register MDIO bus\n");
index d405503..45b90eb 100644 (file)
@@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
         */
        pba_size--;
        if (pba_num_size < (((u32)pba_size * 2) + 1)) {
-               hw_dbg(hw, "Buffer to small for PBA data.\n");
+               hw_dbg(hw, "Buffer too small for PBA data.\n");
                return I40E_ERR_PARAM;
        }
 
index 778dab1..f260dd9 100644 (file)
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
 
 struct tx_sync_info {
        u64 rcd_sn;
-       s32 sync_len;
+       u32 sync_len;
        int nr_frags;
        skb_frag_t frags[MAX_SKB_FRAGS];
 };
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
 
 static enum mlx5e_ktls_sync_retval
 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
-                u32 tcp_seq, struct tx_sync_info *info)
+                u32 tcp_seq, int datalen, struct tx_sync_info *info)
 {
        struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
        enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
        struct tls_record_info *record;
        int remaining, i = 0;
        unsigned long flags;
+       bool ends_before;
 
        spin_lock_irqsave(&tx_ctx->lock, flags);
        record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
                goto out;
        }
 
-       if (unlikely(tcp_seq < tls_record_start_seq(record))) {
-               ret = tls_record_is_start_marker(record) ?
-                       MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+       /* There are the following cases:
+        * 1. packet ends before start marker: bypass offload.
+        * 2. packet starts before start marker and ends after it: drop,
+        *    not supported, breaks contract with kernel.
+        * 3. packet ends before tls record info starts: drop,
+        *    this packet was already acknowledged and its record info
+        *    was released.
+        */
+       ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+       if (unlikely(tls_record_is_start_marker(record))) {
+               ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+               goto out;
+       } else if (ends_before) {
+               ret = MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        u8 num_wqebbs;
        int i = 0;
 
-       ret = tx_sync_info_get(priv_tx, seq, &info);
+       ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
        if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
                if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
                        stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                goto err_out;
        }
 
-       if (unlikely(info.sync_len < 0)) {
-               if (likely(datalen <= -info.sync_len))
-                       return MLX5E_KTLS_SYNC_DONE;
-
-               stats->tls_drop_bypass_req++;
-               goto err_out;
-       }
-
        stats->tls_ooo++;
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        if (unlikely(contig_wqebbs_room < num_wqebbs))
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
-       tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
        for (; i < info.nr_frags; i++) {
                unsigned int orig_fsz, frag_offset = 0, n = 0;
                skb_frag_t *f = &info.frags[i];
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
                enum mlx5e_ktls_sync_retval ret =
                        mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
 
-               if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+               switch (ret) {
+               case MLX5E_KTLS_SYNC_DONE:
                        *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
-               else if (ret == MLX5E_KTLS_SYNC_FAIL)
+                       break;
+               case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+                       if (likely(!skb->decrypted))
+                               goto out;
+                       WARN_ON_ONCE(1);
+                       /* fall-through */
+               default: /* MLX5E_KTLS_SYNC_FAIL */
                        goto err_out;
-               else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
-                       goto out;
+               }
        }
 
        priv_tx->expected_seq = seq + datalen;
index 024e1cd..7e32b9e 100644 (file)
@@ -4036,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
        u32 rate_mbps;
        int err;
 
+       vport_num = rpriv->rep->vport;
+       if (vport_num >= MLX5_VPORT_ECPF) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+               return -EOPNOTSUPP;
+       }
+
        esw = priv->mdev->priv.eswitch;
        /* rate is given in bytes/sec.
         * First convert to bits/sec and then round to the nearest mbit/secs.
@@ -4044,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
         * 1 mbit/sec.
         */
        rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
-       vport_num = rpriv->rep->vport;
-
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
index 2c965ad..3df3604 100644 (file)
@@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
        struct mlx5_vport *vport;
        int i;
 
-       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
                memset(&vport->info, 0, sizeof(vport->info));
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+       }
 }
 
 /* Public E-Switch API */
index 243a544..3e64127 100644 (file)
@@ -866,7 +866,7 @@ out:
  */
 #define ESW_SIZE (16 * 1024 * 1024)
 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
-                                   64 * 1024, 4 * 1024 };
+                                   64 * 1024, 128 };
 
 static int
 get_sz_from_pool(struct mlx5_eswitch *esw)
@@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
                return -EINVAL;
        }
 
-       mlx5_eswitch_disable(esw, false);
+       mlx5_eswitch_disable(esw, true);
        mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
        if (err) {
@@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
 
 int esw_offloads_enable(struct mlx5_eswitch *esw)
 {
-       int err;
+       struct mlx5_vport *vport;
+       int err, i;
 
        if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
            MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        if (err)
                goto err_vport_metadata;
 
+       /* Representor will control the vport link state */
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
        err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
        if (err)
                goto err_vports;
@@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
 {
        int err, err1;
 
-       mlx5_eswitch_disable(esw, false);
+       mlx5_eswitch_disable(esw, true);
        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
index cf7b8da..f554cfd 100644 (file)
@@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x101d) },                      /* ConnectX-6 Dx */
        { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},   /* ConnectX Family mlx5Gen Virtual Function */
        { PCI_VDEVICE(MELLANOX, 0x101f) },                      /* ConnectX-6 LX */
+       { PCI_VDEVICE(MELLANOX, 0x1021) },                      /* ConnectX-7 */
        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
        { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
index 51803ee..c7f10d4 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 /* Copyright (c) 2019 Mellanox Technologies. */
 
+#include <linux/smp.h>
 #include "dr_types.h"
 
 #define QUEUE_SIZE 128
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        if (!in)
                goto err_cqwq;
 
-       vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+       vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
        err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
        if (err) {
                kvfree(in);
index 3d587d0..1e32e24 100644 (file)
@@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
        if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
                list_for_each_entry(dst, &fte->node.children, node.list) {
                        enum mlx5_flow_destination_type type = dst->dest_attr.type;
-                       u32 id;
 
                        if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
                                err = -ENOSPC;
                                goto free_actions;
                        }
 
-                       switch (type) {
-                       case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
-                               id = dst->dest_attr.counter_id;
+                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
 
-                               tmp_action =
-                                       mlx5dr_action_create_flow_counter(id);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               actions[num_actions++] = tmp_action;
-                               break;
+                       switch (type) {
                        case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
                                tmp_action = create_ft_action(dev, dst);
                                if (!tmp_action) {
@@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                }
        }
 
+       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       u32 id;
+
+                       if (dst->dest_attr.type !=
+                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -ENOSPC;
+                               goto free_actions;
+                       }
+
+                       id = dst->dest_attr.counter_id;
+                       tmp_action =
+                               mlx5dr_action_create_flow_counter(id);
+                       if (!tmp_action) {
+                               err = -ENOMEM;
+                               goto free_actions;
+                       }
+
+                       fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                       actions[num_actions++] = tmp_action;
+               }
+       }
+
        params.match_sz = match_sz;
        params.match_buf = (u64 *)fte->val;
 
index 150b3a1..3d3cca5 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/string.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
+#include <linux/mutex.h>
 #include <net/net_namespace.h>
 #include <net/tc_act/tc_vlan.h>
 
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
        struct mlxsw_sp_fid *dummy_fid;
        struct rhashtable ruleset_ht;
        struct list_head rules;
+       struct mutex rules_lock; /* Protects rules list */
        struct {
                struct delayed_work dw;
                unsigned long interval; /* ms */
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
                        goto err_ruleset_block_bind;
        }
 
+       mutex_lock(&mlxsw_sp->acl->rules_lock);
        list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+       mutex_unlock(&mlxsw_sp->acl->rules_lock);
        block->rule_count++;
        block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
        return 0;
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
 
        block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
        ruleset->ht_key.block->rule_count--;
+       mutex_lock(&mlxsw_sp->acl->rules_lock);
        list_del(&rule->list);
+       mutex_unlock(&mlxsw_sp->acl->rules_lock);
        if (!ruleset->ht_key.chain_index &&
            mlxsw_sp_acl_ruleset_is_singular(ruleset))
                mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
        struct mlxsw_sp_acl_rule *rule;
        int err;
 
-       /* Protect internal structures from changes */
-       rtnl_lock();
+       mutex_lock(&acl->rules_lock);
        list_for_each_entry(rule, &acl->rules, list) {
                err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
                                                        rule);
                if (err)
                        goto err_rule_update;
        }
-       rtnl_unlock();
+       mutex_unlock(&acl->rules_lock);
        return 0;
 
 err_rule_update:
-       rtnl_unlock();
+       mutex_unlock(&acl->rules_lock);
        return err;
 }
 
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        acl->dummy_fid = fid;
 
        INIT_LIST_HEAD(&acl->rules);
+       mutex_init(&acl->rules_lock);
        err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
        if (err)
                goto err_acl_ops_init;
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        return 0;
 
 err_acl_ops_init:
+       mutex_destroy(&acl->rules_lock);
        mlxsw_sp_fid_put(fid);
 err_fid_get:
        rhashtable_destroy(&acl->ruleset_ht);
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
 
        cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
        mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+       mutex_destroy(&acl->rules_lock);
        WARN_ON(!list_empty(&acl->rules));
        mlxsw_sp_fid_put(acl->dummy_fid);
        rhashtable_destroy(&acl->ruleset_ht);
index b339125..05e7604 100644 (file)
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
 
        netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
 
+       spin_lock_init(&lp->lock);
+
        for (i = 0; i < SONIC_NUM_RRS; i++) {
                struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
                if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
        return 0;
 }
 
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+       struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+       int i;
+       u16 bits;
+
+       for (i = 0; i < 1000; ++i) {
+               bits = SONIC_READ(SONIC_CMD) & mask;
+               if (!bits)
+                       return;
+               if (irqs_disabled() || in_interrupt())
+                       udelay(20);
+               else
+                       usleep_range(100, 200);
+       }
+       WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
 
 /*
  * Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
        /*
         * stop the SONIC, disable interrupts
         */
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       sonic_quiesce(dev, SONIC_CR_ALL);
+
        SONIC_WRITE(SONIC_IMR, 0);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
         * put the Sonic into software-reset mode and
         * disable all interrupts before releasing DMA buffers
         */
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       sonic_quiesce(dev, SONIC_CR_ALL);
+
        SONIC_WRITE(SONIC_IMR, 0);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
  *   wake the tx queue
  * Concurrently with all of this, the SONIC is potentially writing to
  * the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
  */
 
 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
        struct sonic_local *lp = netdev_priv(dev);
        dma_addr_t laddr;
        int length;
-       int entry = lp->next_tx;
+       int entry;
+       unsigned long flags;
 
        netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
 
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       spin_lock_irqsave(&lp->lock, flags);
+
+       entry = lp->next_tx;
+
        sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
        sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
        sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
        sonic_tda_put(dev, entry, SONIC_TD_LINK,
                sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
 
-       /*
-        * Must set tx_skb[entry] only after clearing status, and
-        * before clearing EOL and before stopping queue
-        */
        wmb();
        lp->tx_len[entry] = length;
        lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
 
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
        struct net_device *dev = dev_id;
        struct sonic_local *lp = netdev_priv(dev);
        int status;
+       unsigned long flags;
+
+       /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+        * with sonic_send_packet() so that the two functions can share state.
+        * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+        * by macsonic which must use two IRQs with different priority levels.
+        */
+       spin_lock_irqsave(&lp->lock, flags);
+
+       status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+       if (!status) {
+               spin_unlock_irqrestore(&lp->lock, flags);
 
-       if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
                return IRQ_NONE;
+       }
 
        do {
+               SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
                if (status & SONIC_INT_PKTRX) {
                        netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
                        sonic_rx(dev);  /* got packet(s) */
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
                }
 
                if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        int td_status;
                        int freed_some = 0;
 
-                       /* At this point, cur_tx is the index of a TD that is one of:
-                        *   unallocated/freed                          (status set   & tx_skb[entry] clear)
-                        *   allocated and sent                         (status set   & tx_skb[entry] set  )
-                        *   allocated and not yet sent                 (status clear & tx_skb[entry] set  )
-                        *   still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+                       /* The state of a Transmit Descriptor may be inferred
+                        * from { tx_skb[entry], td_status } as follows.
+                        * { clear, clear } => the TD has never been used
+                        * { set,   clear } => the TD was handed to SONIC
+                        * { set,   set   } => the TD was handed back
+                        * { clear, set   } => the TD is available for re-use
                         */
 
                        netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                                if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
                                        break;
 
-                               if (td_status & 0x0001) {
+                               if (td_status & SONIC_TCR_PTX) {
                                        lp->stats.tx_packets++;
                                        lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
                                } else {
-                                       lp->stats.tx_errors++;
-                                       if (td_status & 0x0642)
+                                       if (td_status & (SONIC_TCR_EXD |
+                                           SONIC_TCR_EXC | SONIC_TCR_BCM))
                                                lp->stats.tx_aborted_errors++;
-                                       if (td_status & 0x0180)
+                                       if (td_status &
+                                           (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
                                                lp->stats.tx_carrier_errors++;
-                                       if (td_status & 0x0020)
+                                       if (td_status & SONIC_TCR_OWC)
                                                lp->stats.tx_window_errors++;
-                                       if (td_status & 0x0004)
+                                       if (td_status & SONIC_TCR_FU)
                                                lp->stats.tx_fifo_errors++;
                                }
 
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        if (freed_some || lp->tx_skb[entry] == NULL)
                                netif_wake_queue(dev);  /* The ring is no longer full */
                        lp->cur_tx = entry;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
                }
 
                /*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                if (status & SONIC_INT_RFO) {
                        netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
                                  __func__);
-                       lp->stats.rx_fifo_errors++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
                }
                if (status & SONIC_INT_RDE) {
                        netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
                                  __func__);
-                       lp->stats.rx_dropped++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
                }
                if (status & SONIC_INT_RBAE) {
                        netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
                                  __func__);
-                       lp->stats.rx_dropped++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
                }
 
                /* counter overruns; all counters are 16bit wide */
-               if (status & SONIC_INT_FAE) {
+               if (status & SONIC_INT_FAE)
                        lp->stats.rx_frame_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
-               }
-               if (status & SONIC_INT_CRC) {
+               if (status & SONIC_INT_CRC)
                        lp->stats.rx_crc_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
-               }
-               if (status & SONIC_INT_MP) {
+               if (status & SONIC_INT_MP)
                        lp->stats.rx_missed_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
-               }
 
                /* transmit error */
                if (status & SONIC_INT_TXER) {
-                       if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
-                               netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
-                                         __func__);
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+                       u16 tcr = SONIC_READ(SONIC_TCR);
+
+                       netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+                                 __func__, tcr);
+
+                       if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+                                  SONIC_TCR_FU | SONIC_TCR_BCM)) {
+                               /* Aborted transmission. Try again. */
+                               netif_stop_queue(dev);
+                               SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+                       }
                }
 
                /* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        /* ... to help debug DMA problems causing endless interrupts. */
                        /* Bounce the eth interface to turn on the interrupt again. */
                        SONIC_WRITE(SONIC_IMR, 0);
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
                }
 
-               /* load CAM done */
-               if (status & SONIC_INT_LCD)
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
-       } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+               status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+       } while (status);
+
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return IRQ_HANDLED;
 }
 
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+                          unsigned int last)
+{
+       unsigned int i = last;
+
+       do {
+               i = (i + 1) & SONIC_RRS_MASK;
+               if (addr == lp->rx_laddr[i])
+                       return i;
+       } while (i != last);
+
+       return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+                          struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+       *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+       if (!*new_skb)
+               return false;
+
+       if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+               skb_reserve(*new_skb, 2);
+
+       *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
+       if (!*new_addr) {
+               dev_kfree_skb(*new_skb);
+               *new_skb = NULL;
+               return false;
+       }
+
+       return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+                            dma_addr_t old_addr, dma_addr_t new_addr)
+{
+       unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+       unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+       u32 buf;
+
+       /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+        * scans the other resources in the RRA, those in the range [RWP, RRP).
+        */
+       do {
+               buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+                     sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+               if (buf == old_addr)
+                       break;
+
+               entry = (entry + 1) & SONIC_RRS_MASK;
+       } while (entry != end);
+
+       WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+       entry = (entry + 1) & SONIC_RRS_MASK;
+
+       SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
 /*
  * We have a good packet(s), pass it/them up the network stack.
  */
 static void sonic_rx(struct net_device *dev)
 {
        struct sonic_local *lp = netdev_priv(dev);
-       int status;
        int entry = lp->cur_rx;
+       int prev_entry = lp->eol_rx;
+       bool rbe = false;
 
        while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
-               struct sk_buff *used_skb;
-               struct sk_buff *new_skb;
-               dma_addr_t new_laddr;
-               u16 bufadr_l;
-               u16 bufadr_h;
-               int pkt_len;
-
-               status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
-               if (status & SONIC_RCR_PRX) {
-                       /* Malloc up new buffer. */
-                       new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
-                       if (new_skb == NULL) {
-                               lp->stats.rx_dropped++;
+               u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+               /* If the RD has LPKT set, the chip has finished with the RB */
+               if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+                       struct sk_buff *new_skb;
+                       dma_addr_t new_laddr;
+                       u32 addr = (sonic_rda_get(dev, entry,
+                                                 SONIC_RD_PKTPTR_H) << 16) |
+                                  sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+                       int i = index_from_addr(lp, addr, entry);
+
+                       if (i < 0) {
+                               WARN_ONCE(1, "failed to find buffer!\n");
                                break;
                        }
-                       /* provide 16 byte IP header alignment unless DMA requires otherwise */
-                       if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
-                               skb_reserve(new_skb, 2);
-
-                       new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
-                                              SONIC_RBSIZE, DMA_FROM_DEVICE);
-                       if (!new_laddr) {
-                               dev_kfree_skb(new_skb);
-                               printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+                       if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+                               struct sk_buff *used_skb = lp->rx_skb[i];
+                               int pkt_len;
+
+                               /* Pass the used buffer up the stack */
+                               dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+                                                DMA_FROM_DEVICE);
+
+                               pkt_len = sonic_rda_get(dev, entry,
+                                                       SONIC_RD_PKTLEN);
+                               skb_trim(used_skb, pkt_len);
+                               used_skb->protocol = eth_type_trans(used_skb,
+                                                                   dev);
+                               netif_rx(used_skb);
+                               lp->stats.rx_packets++;
+                               lp->stats.rx_bytes += pkt_len;
+
+                               lp->rx_skb[i] = new_skb;
+                               lp->rx_laddr[i] = new_laddr;
+                       } else {
+                               /* Failed to obtain a new buffer so re-use it */
+                               new_laddr = addr;
                                lp->stats.rx_dropped++;
-                               break;
                        }
-
-                       /* now we have a new skb to replace it, pass the used one up the stack */
-                       dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
-                       used_skb = lp->rx_skb[entry];
-                       pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
-                       skb_trim(used_skb, pkt_len);
-                       used_skb->protocol = eth_type_trans(used_skb, dev);
-                       netif_rx(used_skb);
-                       lp->stats.rx_packets++;
-                       lp->stats.rx_bytes += pkt_len;
-
-                       /* and insert the new skb */
-                       lp->rx_laddr[entry] = new_laddr;
-                       lp->rx_skb[entry] = new_skb;
-
-                       bufadr_l = (unsigned long)new_laddr & 0xffff;
-                       bufadr_h = (unsigned long)new_laddr >> 16;
-                       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
-                       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
-               } else {
-                       /* This should only happen, if we enable accepting broken packets. */
-                       lp->stats.rx_errors++;
-                       if (status & SONIC_RCR_FAER)
-                               lp->stats.rx_frame_errors++;
-                       if (status & SONIC_RCR_CRCR)
-                               lp->stats.rx_crc_errors++;
-               }
-               if (status & SONIC_RCR_LPKT) {
-                       /*
-                        * this was the last packet out of the current receive buffer
-                        * give the buffer back to the SONIC
+                       /* If RBE is already asserted when RWP advances then
+                        * it's safe to clear RBE after processing this packet.
                         */
-                       lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
-                       if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
-                       SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
-                       if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
-                               netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
-                                         __func__);
-                               SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
-                       }
-               } else
-                       printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
-                            dev->name);
+                       rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+                       sonic_update_rra(dev, lp, addr, new_laddr);
+               }
                /*
                 * give back the descriptor
                 */
-               sonic_rda_put(dev, entry, SONIC_RD_LINK,
-                       sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+               sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
                sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
-               sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
-                       sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
-               lp->eol_rx = entry;
-               lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+               prev_entry = entry;
+               entry = (entry + 1) & SONIC_RDS_MASK;
+       }
+
+       lp->cur_rx = entry;
+
+       if (prev_entry != lp->eol_rx) {
+               /* Advance the EOL flag to put descriptors back into service */
+               sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+                             sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+               sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+                             sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+               lp->eol_rx = prev_entry;
        }
+
+       if (rbe)
+               SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
        /*
         * If any worth-while packets have been received, netif_rx()
         * has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
                    (netdev_mc_count(dev) > 15)) {
                        rcr |= SONIC_RCR_AMC;
                } else {
+                       unsigned long flags;
+
                        netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
                                  netdev_mc_count(dev));
                        sonic_set_cam_enable(dev, 1);  /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
                                i++;
                        }
                        SONIC_WRITE(SONIC_CDC, 16);
-                       /* issue Load CAM command */
                        SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+                       /* LCAM and TXP commands can't be used simultaneously */
+                       spin_lock_irqsave(&lp->lock, flags);
+                       sonic_quiesce(dev, SONIC_CR_TXP);
                        SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+                       sonic_quiesce(dev, SONIC_CR_LCAM);
+                       spin_unlock_irqrestore(&lp->lock, flags);
                }
        }
 
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
  */
 static int sonic_init(struct net_device *dev)
 {
-       unsigned int cmd;
        struct sonic_local *lp = netdev_priv(dev);
        int i;
 
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
 
+       /* While in reset mode, clear CAM Enable register */
+       SONIC_WRITE(SONIC_CE, 0);
+
        /*
         * clear software reset flag, disable receiver, clear and
         * enable interrupts, then completely initialize the SONIC
         */
        SONIC_WRITE(SONIC_CMD, 0);
-       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+       sonic_quiesce(dev, SONIC_CR_ALL);
 
        /*
         * initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
        }
 
        /* initialize all RRA registers */
-       lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
-                                       SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-       lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
-                                       SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
-       SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
-       SONIC_WRITE(SONIC_REA, lp->rra_end);
-       SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
-       SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+       SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+       SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+       SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+       SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
        SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
        SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
 
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
        netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
 
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
-       i = 0;
-       while (i++ < 100) {
-               if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
-                       break;
-       }
-
-       netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
-                 SONIC_READ(SONIC_CMD), i);
+       sonic_quiesce(dev, SONIC_CR_RRRA);
 
        /*
         * Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
         * load the CAM
         */
        SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
-       i = 0;
-       while (i++ < 100) {
-               if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
-                       break;
-       }
-       netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
-                 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+       sonic_quiesce(dev, SONIC_CR_LCAM);
 
        /*
         * enable receiver, disable loopback
         * and enable all interrupts
         */
-       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
        SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
        SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
-       cmd = SONIC_READ(SONIC_CMD);
-       if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
-               printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
 
        netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
                  SONIC_READ(SONIC_CMD));
index 2b27f70..1df6d2f 100644 (file)
 #define SONIC_CR_TXP            0x0002
 #define SONIC_CR_HTX            0x0001
 
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+                     SONIC_CR_RXEN | SONIC_CR_TXP)
+
 /*
  * SONIC data configuration bits
  */
 #define SONIC_TCR_NCRS          0x0100
 #define SONIC_TCR_CRLS          0x0080
 #define SONIC_TCR_EXC           0x0040
+#define SONIC_TCR_OWC           0x0020
 #define SONIC_TCR_PMB           0x0008
 #define SONIC_TCR_FU            0x0004
 #define SONIC_TCR_BCM           0x0002
 #define SONIC_NUM_RDS   SONIC_NUM_RRS /* number of receive descriptors */
 #define SONIC_NUM_TDS   16            /* number of transmit descriptors */
 
-#define SONIC_RDS_MASK  (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK  (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK  (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK  (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK  (SONIC_NUM_TDS - 1)
 
 #define SONIC_RBSIZE   1520          /* size of one resource buffer */
 
@@ -312,8 +317,6 @@ struct sonic_local {
        u32 rda_laddr;              /* logical DMA address of RDA */
        dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
        dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
-       unsigned int rra_end;
-       unsigned int cur_rwp;
        unsigned int cur_rx;
        unsigned int cur_tx;           /* first unacked transmit packet */
        unsigned int eol_rx;
@@ -322,6 +325,7 @@ struct sonic_local {
        int msg_enable;
        struct device *device;         /* generic device */
        struct net_device_stats stats;
+       spinlock_t lock;
 };
 
 #define TX_TIMEOUT (3 * HZ)
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
    as far as we can tell. */
 /* OpenBSD calls this "SWO".  I'd like to think that sonic_buf_put()
    is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
                                 int offset, __u16 val)
 {
        if (bitmode)
 #ifdef __BIG_ENDIAN
-               ((__u16 *) base + (offset*2))[1] = val;
+               __raw_writew(val, base + (offset * 2) + 1);
 #else
-               ((__u16 *) base + (offset*2))[0] = val;
+               __raw_writew(val, base + (offset * 2) + 0);
 #endif
        else
-               ((__u16 *) base)[offset] = val;
+               __raw_writew(val, base + (offset * 1) + 0);
 }
 
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
                                  int offset)
 {
        if (bitmode)
 #ifdef __BIG_ENDIAN
-               return ((volatile __u16 *) base + (offset*2))[1];
+               return __raw_readw(base + (offset * 2) + 1);
 #else
-               return ((volatile __u16 *) base + (offset*2))[0];
+               return __raw_readw(base + (offset * 2) + 0);
 #endif
        else
-               return ((volatile __u16 *) base)[offset];
+               return __raw_readw(base + (offset * 1) + 0);
 }
 
 /* Inlines that you should actually use for reading/writing DMA buffers */
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
                             (entry * SIZEOF_SONIC_RR) + offset);
 }
 
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+       struct sonic_local *lp = netdev_priv(dev);
+
+       return lp->rra_laddr +
+              entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+       struct sonic_local *lp = netdev_priv(dev);
+
+       return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+                                             SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
 static const char version[] =
     "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
 
index a496390..07f9067 100644 (file)
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
                        break;
                }
                entry += p_hdr->size;
+               cond_resched();
        }
        p_dev->ahw->reset.seq_index = index;
 }
index afa10a1..f34ae8c 100644 (file)
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
                addr += 16;
                reg_read -= 16;
                ret += 16;
+               cond_resched();
        }
 out:
        mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                buf_offset += entry->hdr.cap_size;
                entry_offset += entry->hdr.offset;
                buffer = fw_dump->data + buf_offset;
+               cond_resched();
        }
 
        fw_dump->clr = 1;
index 4775f49..d10ac54 100644 (file)
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
                *mac = NULL;
        }
 
-       rc = of_get_phy_mode(np, &plat->phy_interface);
-       if (rc)
-               return ERR_PTR(rc);
+       plat->phy_interface = device_get_phy_mode(&pdev->dev);
+       if (plat->phy_interface < 0)
+               return ERR_PTR(plat->phy_interface);
 
        plat->interface = stmmac_of_get_mac_mode(np);
        if (plat->interface < 0)
index f6222ad..9b3ba98 100644 (file)
@@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
                return NULL;
        }
 
-       if (sock->sk->sk_protocol != IPPROTO_UDP) {
+       sk = sock->sk;
+       if (sk->sk_protocol != IPPROTO_UDP ||
+           sk->sk_type != SOCK_DGRAM ||
+           (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
                pr_debug("socket fd=%d not UDP\n", fd);
                sk = ERR_PTR(-EINVAL);
                goto out_sock;
        }
 
-       lock_sock(sock->sk);
-       if (sock->sk->sk_user_data) {
+       lock_sock(sk);
+       if (sk->sk_user_data) {
                sk = ERR_PTR(-EBUSY);
                goto out_rel_sock;
        }
 
-       sk = sock->sk;
        sock_hold(sk);
 
        tuncfg.sk_user_data = gtp;
index 2a91c19..61d7e0d 100644 (file)
@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
  */
 static void slip_write_wakeup(struct tty_struct *tty)
 {
-       struct slip *sl = tty->disc_data;
+       struct slip *sl;
+
+       rcu_read_lock();
+       sl = rcu_dereference(tty->disc_data);
+       if (!sl)
+               goto out;
 
        schedule_work(&sl->tx_work);
+out:
+       rcu_read_unlock();
 }
 
 static void sl_tx_timeout(struct net_device *dev)
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
                return;
 
        spin_lock_bh(&sl->lock);
-       tty->disc_data = NULL;
+       rcu_assign_pointer(tty->disc_data, NULL);
        sl->tty = NULL;
        spin_unlock_bh(&sl->lock);
 
+       synchronize_rcu();
        flush_work(&sl->tx_work);
 
        /* VSV = very important to remove timers */
index 683d371..35e884a 100644 (file)
@@ -1936,6 +1936,10 @@ drop:
                        if (ret != XDP_PASS) {
                                rcu_read_unlock();
                                local_bh_enable();
+                               if (frags) {
+                                       tfile->napi.skb = NULL;
+                                       mutex_unlock(&tfile->napi_mutex);
+                               }
                                return total_len;
                        }
                }
index 75bdfae..c2a58f0 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mdio.h>
 #include <linux/phy.h>
 #include <net/ip6_checksum.h>
+#include <net/vxlan.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
@@ -3668,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
        tasklet_schedule(&dev->bh);
 }
 
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+                                               struct net_device *netdev,
+                                               netdev_features_t features)
+{
+       if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+               features &= ~NETIF_F_GSO_MASK;
+
+       features = vlan_features_check(skb, features);
+       features = vxlan_features_check(skb, features);
+
+       return features;
+}
+
 static const struct net_device_ops lan78xx_netdev_ops = {
        .ndo_open               = lan78xx_open,
        .ndo_stop               = lan78xx_stop,
@@ -3681,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
        .ndo_set_features       = lan78xx_set_features,
        .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
+       .ndo_features_check     = lan78xx_features_check,
 };
 
 static void lan78xx_stat_monitor(struct timer_list *t)
index 031cb8f..3f425f9 100644 (file)
@@ -31,7 +31,7 @@
 #define NETNEXT_VERSION                "11"
 
 /* Information for net */
-#define NET_VERSION            "10"
+#define NET_VERSION            "11"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -68,6 +68,7 @@
 #define PLA_LED_FEATURE                0xdd92
 #define PLA_PHYAR              0xde00
 #define PLA_BOOT_CTRL          0xe004
+#define PLA_LWAKE_CTRL_REG     0xe007
 #define PLA_GPHY_INTR_IMR      0xe022
 #define PLA_EEE_CR             0xe040
 #define PLA_EEEP_CR            0xe080
@@ -95,6 +96,7 @@
 #define PLA_TALLYCNT           0xe890
 #define PLA_SFF_STS_7          0xe8de
 #define PLA_PHYSTATUS          0xe908
+#define PLA_CONFIG6            0xe90a /* CONFIG6 */
 #define PLA_BP_BA              0xfc26
 #define PLA_BP_0               0xfc28
 #define PLA_BP_1               0xfc2a
 #define PLA_BP_EN              0xfc38
 
 #define USB_USB2PHY            0xb41e
+#define USB_SSPHYLINK1         0xb426
 #define USB_SSPHYLINK2         0xb428
 #define USB_U2P3_CTRL          0xb460
 #define USB_CSR_DUMMY1         0xb464
 #define LINK_ON_WAKE_EN                0x0010
 #define LINK_OFF_WAKE_EN       0x0008
 
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN         BIT(0)
+
 /* PLA_CONFIG5 */
 #define BWF_EN                 0x0040
 #define MWF_EN                 0x0020
 /* PLA_PHY_PWR */
 #define TX_10M_IDLE_EN         0x0080
 #define PFM_PWM_SWITCH         0x0040
+#define TEST_IO_OFF            BIT(4)
 
 /* PLA_MAC_PWR_CTRL */
 #define D3_CLK_GATED_EN                0x00004000
 #define MAC_CLK_SPDWN_EN       BIT(15)
 
 /* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN       BIT(14)
 #define PKT_AVAIL_SPDWN_EN     0x0100
 #define SUSPEND_SPDWN_EN       0x0004
 #define U1U2_SPDWN_EN          0x0002
 /* PLA_BOOT_CTRL */
 #define AUTOLOAD_DONE          0x0002
 
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN            BIT(7)
+
 /* PLA_SUSPEND_FLAG */
 #define LINK_CHG_EVENT         BIT(0)
 
 #define DEBUG_LTSSM            0x0082
 
 /* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK            BIT(15)
 #define U3P3_CHECK_EN          BIT(7)  /* RTL_VER_05 only */
 #define LINK_CHANGE_FLAG       BIT(8)
+#define POLL_LINK_CHG          BIT(0)
 
 /* USB_USB2PHY */
 #define USB2PHY_SUSPEND                0x0001
 #define USB2PHY_L1             0x0002
 
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG      BIT(1)
+
 /* USB_SSPHYLINK2 */
 #define pwd_dn_scale_mask      0x3ffe
 #define pwd_dn_scale(x)                ((x) << 1)
@@ -2863,6 +2879,17 @@ static int rtl8153_enable(struct r8152 *tp)
        r8153_set_rx_early_timeout(tp);
        r8153_set_rx_early_size(tp);
 
+       if (tp->version == RTL_VER_09) {
+               u32 ocp_data;
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+               ocp_data &= ~FC_PATCH_TASK;
+               ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+               usleep_range(1000, 2000);
+               ocp_data |= FC_PATCH_TASK;
+               ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+       }
+
        return rtl_enable(tp);
 }
 
@@ -3376,8 +3403,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
                r8153b_ups_en(tp, false);
                r8153_queue_wake(tp, false);
                rtl_runtime_suspend_enable(tp, false);
-               r8153_u2p3en(tp, true);
-               r8153b_u1u2en(tp, true);
+               if (tp->udev->speed != USB_SPEED_HIGH)
+                       r8153b_u1u2en(tp, true);
        }
 }
 
@@ -4675,7 +4702,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
 
        r8153_aldps_en(tp, true);
        r8152b_enable_fc(tp);
-       r8153_u2p3en(tp, true);
 
        set_bit(PHY_RESET, &tp->flags);
 }
@@ -4954,6 +4980,8 @@ static void rtl8152_down(struct r8152 *tp)
 
 static void rtl8153_up(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -4961,6 +4989,19 @@ static void rtl8153_up(struct r8152 *tp)
        r8153_u2p3en(tp, false);
        r8153_aldps_en(tp, false);
        r8153_first_init(tp);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data |= LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+       ocp_data &= ~LANWAKE_PIN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+       ocp_data &= ~DELAY_PHY_PWR_CHG;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
        r8153_aldps_en(tp, true);
 
        switch (tp->version) {
@@ -4979,11 +5020,17 @@ static void rtl8153_up(struct r8152 *tp)
 
 static void rtl8153_down(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
 
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data &= ~LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
        r8153_u1u2en(tp, false);
        r8153_u2p3en(tp, false);
        r8153_power_cut_en(tp, false);
@@ -4994,6 +5041,8 @@ static void rtl8153_down(struct r8152 *tp)
 
 static void rtl8153b_up(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -5004,18 +5053,29 @@ static void rtl8153b_up(struct r8152 *tp)
        r8153_first_init(tp);
        ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data &= ~PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
        r8153_aldps_en(tp, true);
-       r8153_u2p3en(tp, true);
-       r8153b_u1u2en(tp, true);
+
+       if (tp->udev->speed != USB_SPEED_HIGH)
+               r8153b_u1u2en(tp, true);
 }
 
 static void rtl8153b_down(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data |= PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
        r8153b_u1u2en(tp, false);
        r8153_u2p3en(tp, false);
        r8153b_power_cut_en(tp, false);
@@ -5387,6 +5447,16 @@ static void r8153_init(struct r8152 *tp)
                else
                        ocp_data |= DYNAMIC_BURST;
                ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+               r8153_queue_wake(tp, false);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+               if (rtl8152_get_speed(tp) & LINK_STATUS)
+                       ocp_data |= CUR_LINK_OK;
+               else
+                       ocp_data &= ~CUR_LINK_OK;
+               ocp_data |= POLL_LINK_CHG;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
        }
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -5416,10 +5486,19 @@ static void r8153_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
 
        r8153_power_cut_en(tp, false);
+       rtl_runtime_suspend_enable(tp, false);
        r8153_u1u2en(tp, true);
        r8153_mac_clk_spd(tp, false);
        usb_enable_lpm(tp->udev);
 
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data |= LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+       ocp_data &= ~LANWAKE_PIN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@@ -5484,7 +5563,17 @@ static void r8153b_init(struct r8152 *tp)
        r8153b_ups_en(tp, false);
        r8153_queue_wake(tp, false);
        rtl_runtime_suspend_enable(tp, false);
-       r8153b_u1u2en(tp, true);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+       if (rtl8152_get_speed(tp) & LINK_STATUS)
+               ocp_data |= CUR_LINK_OK;
+       else
+               ocp_data &= ~CUR_LINK_OK;
+       ocp_data |= POLL_LINK_CHG;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+       if (tp->udev->speed != USB_SPEED_HIGH)
+               r8153b_u1u2en(tp, true);
        usb_enable_lpm(tp->udev);
 
        /* MAC clock speed down */
@@ -5492,6 +5581,19 @@ static void r8153b_init(struct r8152 *tp)
        ocp_data |= MAC_CLK_SPDWN_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data &= ~PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+       if (tp->version == RTL_VER_09) {
+               /* Disable Test IO for 32QFN */
+               if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+                       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+                       ocp_data |= TEST_IO_OFF;
+                       ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+               }
+       }
+
        set_bit(GREEN_ETHERNET, &tp->flags);
 
        /* rx aggregation */
@@ -6707,6 +6809,11 @@ static int rtl8152_probe(struct usb_interface *intf,
 
        intf->needs_remote_wakeup = 1;
 
+       if (!rtl_can_wakeup(tp))
+               __rtl_set_wol(tp, 0);
+       else
+               tp->saved_wolopts = __rtl_get_wol(tp);
+
        tp->rtl_ops.init(tp);
 #if IS_BUILTIN(CONFIG_USB_RTL8152)
        /* Retry in case request_firmware() is not ready yet. */
@@ -6724,10 +6831,6 @@ static int rtl8152_probe(struct usb_interface *intf,
                goto out1;
        }
 
-       if (!rtl_can_wakeup(tp))
-               __rtl_set_wol(tp, 0);
-
-       tp->saved_wolopts = __rtl_get_wol(tp);
        if (tp->saved_wolopts)
                device_set_wakeup_enable(&udev->dev, true);
        else
index f43c065..c4c8f1b 100644 (file)
@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
        case AIROGVLIST:    ridcode = RID_APLIST;       break;
        case AIROGDRVNAM:   ridcode = RID_DRVNAME;      break;
        case AIROGEHTENC:   ridcode = RID_ETHERENCAP;   break;
-       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
-       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
+       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;     break;
+       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;     break;
        case AIROGSTAT:     ridcode = RID_STATUS;       break;
        case AIROGSTATSD32: ridcode = RID_STATSDELTA;   break;
        case AIROGSTATSC32: ridcode = RID_STATS;        break;
@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
                return -EINVAL;
        }
 
-       if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+       if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+               /* Only super-user can read WEP keys */
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+       }
+
+       if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
                return -ENOMEM;
 
        PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
index cd73fc5..fd45483 100644 (file)
@@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_station_priv *sta_priv = NULL;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        __le16 fc;
        u8 hdr_len;
@@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        if (unlikely(!dev_cmd))
                goto drop_unlock_priv;
 
-       memset(dev_cmd, 0, sizeof(*dev_cmd));
        dev_cmd->hdr.cmd = REPLY_TX;
        tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 
index 40fe2d6..48d375a 100644 (file)
@@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
 {
        union acpi_object *wifi_pkg, *data;
        bool enabled;
-       int i, n_profiles, tbl_rev;
-       int  ret = 0;
+       int i, n_profiles, tbl_rev, pos;
+       int ret = 0;
 
        data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
        if (IS_ERR(data))
@@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
                goto out_free;
        }
 
-       for (i = 0; i < n_profiles; i++) {
-               /* the tables start at element 3 */
-               int pos = 3;
+       /* the tables start at element 3 */
+       pos = 3;
 
+       for (i = 0; i < n_profiles; i++) {
                /* The EWRD profiles officially go from 2 to 4, but we
                 * save them in sar_profiles[1-3] (because we don't
                 * have profile 0).  So in the array we start from 1.
index ed90dd1..4c60f99 100644 (file)
@@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
 {
        int ret = 0;
 
-       /* if the FW crashed or not debug monitor cfg was given, there is
-        * no point in changing the recording state
-        */
-       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
-           (!fwrt->trans->dbg.dest_tlv &&
-            fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
                return 0;
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
index 92d9898..c2f7252 100644 (file)
@@ -379,7 +379,7 @@ enum {
 
 
 /* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED    (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED   (0x00000002)
 
 /*
  * UCODE-DRIVER GP (general purpose) mailbox register 1
index f266647..ce8f248 100644 (file)
@@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
        if (!frag || frag->size || !pages)
                return -EIO;
 
-       while (pages) {
+       /*
+        * We try to allocate as many pages as we can, starting with
+        * the requested amount and going down until we can allocate
+        * something.  Because of DIV_ROUND_UP(), pages will never go
+        * down to 0 and stop the loop, so stop when pages reaches 1,
+        * which is too small anyway.
+        */
+       while (pages > 1) {
                block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
                                           &physical,
                                           GFP_KERNEL | __GFP_NOWARN);
index 4096ccf..bc8c959 100644 (file)
@@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling,
 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
 MODULE_PARM_DESC(nvm_file, "NVM file name");
 
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
 MODULE_PARM_DESC(uapsd_disable,
                 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
index ebea3f3..82e5cac 100644 (file)
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
  * @nvm_file: specifies a external NVM file
  * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
  *     IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
  * @fw_monitor: allow to use firmware monitor
  * @disable_11ac: disable VHT capabilities, default = false.
  * @remove_when_gone: remove an inaccessible device from the PCIe bus.
@@ -136,7 +135,6 @@ struct iwl_mod_params {
        int antenna_coupling;
        char *nvm_file;
        u32 uapsd_disable;
-       bool lar_disable;
        bool fw_monitor;
        bool disable_11ac;
        /**
index 1e240a2..d4f834b 100644 (file)
@@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
        NVM_CHANNEL_DC_HIGH             = BIT(12),
 };
 
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ *     2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ *     5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+       REG_CAPA_BF_CCD_LOW_BAND        = BIT(0),
+       REG_CAPA_BF_CCD_HIGH_BAND       = BIT(1),
+       REG_CAPA_160MHZ_ALLOWED         = BIT(2),
+       REG_CAPA_80MHZ_ALLOWED          = BIT(3),
+       REG_CAPA_MCS_8_ALLOWED          = BIT(4),
+       REG_CAPA_MCS_9_ALLOWED          = BIT(5),
+       REG_CAPA_40MHZ_FORBIDDEN        = BIT(7),
+       REG_CAPA_DC_HIGH_ENABLED        = BIT(9),
+};
+
 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
                                               int chan, u32 flags)
 {
@@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const struct iwl_fw *fw,
                   const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+                  u8 tx_chains, u8 rx_chains)
 {
        struct iwl_nvm_data *data;
        bool lar_enabled;
@@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                return NULL;
        }
 
-       if (lar_fw_supported && lar_enabled)
+       if (lar_enabled &&
+           fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
 
        if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
@@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
 
 static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
                                       int ch_idx, u16 nvm_flags,
+                                      u16 cap_flags,
                                       const struct iwl_cfg *cfg)
 {
        u32 flags = NL80211_RRF_NO_HT40;
@@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
            (flags & NL80211_RRF_NO_IR))
                flags |= NL80211_RRF_GO_CONCURRENT;
 
+       /*
+        * cap_flags is per regulatory domain so apply it for every channel
+        */
+       if (ch_idx >= NUM_2GHZ_CHANNELS) {
+               if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+                       flags |= NL80211_RRF_NO_HT40;
+
+               if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+                       flags |= NL80211_RRF_NO_80MHZ;
+
+               if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+                       flags |= NL80211_RRF_NO_160MHZ;
+       }
+
        return flags;
 }
 
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc,
-                      u16 geo_info)
+                      u16 geo_info, u16 cap)
 {
        int ch_idx;
        u16 ch_flags;
@@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                }
 
                reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
-                                                            ch_flags, cfg);
+                                                            ch_flags, cap,
+                                                            cfg);
 
                /* we can't continue the same rule */
                if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
                .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
        };
        int  ret;
-       bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
-                               fw_has_capa(&fw->ucode_capa,
-                                           IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
        bool empty_otp;
        u32 mac_flags;
        u32 sbands_flags = 0;
@@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
        nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
        nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
 
-       if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+       if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+           fw_has_capa(&fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
                nvm->lar_enabled = true;
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
        }
index b7e1ddf..fb0b385 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018        Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018        Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
  */
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const struct iwl_fw *fw,
                   const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+                  u8 tx_chains, u8 rx_chains);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
@@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc,
-                      u16 geo_info);
+                      u16 geo_info, u16 cap);
 
 /**
  * struct iwl_nvm_section - describes an NVM section in memory.
index 28bdc9a..f91197e 100644 (file)
@@ -66,7 +66,9 @@
 
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
-                                 const struct iwl_trans_ops *ops)
+                                 const struct iwl_trans_ops *ops,
+                                 unsigned int cmd_pool_size,
+                                 unsigned int cmd_pool_align)
 {
        struct iwl_trans *trans;
 #ifdef CONFIG_LOCKDEP
@@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
        trans->dev_cmd_pool =
                kmem_cache_create(trans->dev_cmd_pool_name,
-                                 sizeof(struct iwl_device_cmd),
-                                 sizeof(void *),
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
+                                 cmd_pool_size, cmd_pool_align,
+                                 SLAB_HWCACHE_ALIGN, NULL);
        if (!trans->dev_cmd_pool)
                return NULL;
 
index 8cadad7..e33df5a 100644 (file)
@@ -193,6 +193,18 @@ struct iwl_device_cmd {
        };
 } __packed;
 
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+       struct iwl_cmd_header hdr;
+       u8 payload[];
+} __packed;
+
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 
 /*
@@ -544,7 +556,7 @@ struct iwl_trans_ops {
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
        int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
-                 struct iwl_device_cmd *dev_cmd, int queue);
+                 struct iwl_device_tx_cmd *dev_cmd, int queue);
        void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
                        struct sk_buff_head *skbs);
 
@@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
        return trans->ops->dump_data(trans, dump_mask);
 }
 
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 {
-       return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+       return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
 }
 
 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
-                                        struct iwl_device_cmd *dev_cmd)
+                                        struct iwl_device_tx_cmd *dev_cmd)
 {
        kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
 }
 
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                              struct iwl_device_cmd *dev_cmd, int queue)
+                              struct iwl_device_tx_cmd *dev_cmd, int queue)
 {
        if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
                return -EIO;
@@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
  *****************************************************/
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
-                                 const struct iwl_trans_ops *ops);
+                                 const struct iwl_trans_ops *ops,
+                                 unsigned int cmd_pool_size,
+                                 unsigned int cmd_pool_align);
 void iwl_trans_free(struct iwl_trans *trans);
 
 /*****************************************************
index 60aff2e..58df25e 100644 (file)
 #define IWL_MVM_D3_DEBUG                       false
 #define IWL_MVM_USE_TWT                                false
 #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA       10
+#define IWL_MVM_USE_NSSN_SYNC                  0
 
 #endif /* __MVM_CONSTANTS_H */
index dd685f7..c09624d 100644 (file)
@@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
                return 0;
        }
 
+       if (!mvm->fwrt.ppag_table.enabled) {
+               IWL_DEBUG_RADIO(mvm,
+                               "PPAG not enabled, command not sent.\n");
+               return 0;
+       }
+
        IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
-       IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
-                       mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
 
        for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
                for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
index 32dc9d6..6717f25 100644 (file)
@@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                                      __le32_to_cpu(resp->n_channels),
                                      resp->channels,
                                      __le16_to_cpu(resp->mcc),
-                                     __le16_to_cpu(resp->geo_info));
+                                     __le16_to_cpu(resp->geo_info),
+                                     __le16_to_cpu(resp->cap));
        /* Store the return source id */
        src_id = resp->source_id;
        kfree(resp);
@@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        return ret;
 }
 
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+                          struct ieee80211_sta *sta)
+{
+       if (likely(sta)) {
+               if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+                       return;
+       } else {
+               if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+                       return;
+       }
+
+       ieee80211_free_txskb(mvm->hw, skb);
+}
+
 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                           struct ieee80211_tx_control *control,
                           struct sk_buff *skb)
@@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                }
        }
 
-       if (sta) {
-               if (iwl_mvm_tx_skb(mvm, skb, sta))
-                       goto drop;
-               return;
-       }
-
-       if (iwl_mvm_tx_skb_non_sta(mvm, skb))
-               goto drop;
+       iwl_mvm_tx_skb(mvm, skb, sta);
        return;
  drop:
        ieee80211_free_txskb(hw, skb);
@@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
                                break;
                        }
 
-                       if (!txq->sta)
-                               iwl_mvm_tx_skb_non_sta(mvm, skb);
-                       else
-                               iwl_mvm_tx_skb(mvm, skb, txq->sta);
+                       iwl_mvm_tx_skb(mvm, skb, txq->sta);
                }
        } while (atomic_dec_return(&mvmtxq->tx_request));
        rcu_read_unlock();
@@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        return ret;
 }
 
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               rinfo->bw = RATE_INFO_BW_20;
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rinfo->bw = RATE_INFO_BW_40;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rinfo->bw = RATE_INFO_BW_80;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rinfo->bw = RATE_INFO_BW_160;
+               break;
+       }
+
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               rinfo->flags |= RATE_INFO_FLAGS_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_HT_MCS_NSS_MSK) + 1;
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_RATE_CODE_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_NSS_MSK) + 1;
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+       } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+               u32 gi_ltf = u32_get_bits(rate_n_flags,
+                                         RATE_MCS_HE_GI_LTF_MSK);
+
+               rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_RATE_CODE_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_NSS_MSK) + 1;
+
+               if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+                       rinfo->bw = RATE_INFO_BW_HE_RU;
+                       rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+               }
+
+               switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+               case RATE_MCS_HE_TYPE_SU:
+               case RATE_MCS_HE_TYPE_EXT_SU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else if (rate_n_flags & RATE_MCS_SGI_MSK)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_MU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_TRIG:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               }
+
+               if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+                       rinfo->he_dcm = 1;
+       } else {
+               switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+               case IWL_RATE_1M_PLCP:
+                       rinfo->legacy = 10;
+                       break;
+               case IWL_RATE_2M_PLCP:
+                       rinfo->legacy = 20;
+                       break;
+               case IWL_RATE_5M_PLCP:
+                       rinfo->legacy = 55;
+                       break;
+               case IWL_RATE_11M_PLCP:
+                       rinfo->legacy = 110;
+                       break;
+               case IWL_RATE_6M_PLCP:
+                       rinfo->legacy = 60;
+                       break;
+               case IWL_RATE_9M_PLCP:
+                       rinfo->legacy = 90;
+                       break;
+               case IWL_RATE_12M_PLCP:
+                       rinfo->legacy = 120;
+                       break;
+               case IWL_RATE_18M_PLCP:
+                       rinfo->legacy = 180;
+                       break;
+               case IWL_RATE_24M_PLCP:
+                       rinfo->legacy = 240;
+                       break;
+               case IWL_RATE_36M_PLCP:
+                       rinfo->legacy = 360;
+                       break;
+               case IWL_RATE_48M_PLCP:
+                       rinfo->legacy = 480;
+                       break;
+               case IWL_RATE_54M_PLCP:
+                       rinfo->legacy = 540;
+                       break;
+               }
+       }
+}
+
 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
                                       struct ieee80211_sta *sta,
@@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
        }
 
+       if (iwl_mvm_has_tlc_offload(mvm)) {
+               struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+               iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+       }
+
        /* if beacon filtering isn't on mac80211 does it anyway */
        if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
                return;
index 3ec8de0..67ab7e7 100644 (file)
@@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
        bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
                                   IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
-       if (iwlwifi_mod_params.lar_disable)
-               return false;
-
        /*
         * Enable LAR only if it is supported by the FW (TLV) &&
         * enabled in the NVM
@@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
 int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
                                             u16 len, const void *data,
                                             u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
-                  struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+                      struct ieee80211_sta *sta);
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
index 945c1ea..46128a2 100644 (file)
@@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        struct iwl_nvm_section *sections = mvm->nvm_sections;
        const __be16 *hw;
        const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
-       bool lar_enabled;
        int regulatory_type;
 
        /* Checking for required sections */
-       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+       if (mvm->trans->cfg->nvm_type == IWL_NVM) {
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
                    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
                        IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
                (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
 
-       lar_enabled = !iwlwifi_mod_params.lar_disable &&
-                     fw_has_capa(&mvm->fw->ucode_capa,
-                                 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
-       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
-                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-                                 lar_enabled);
+                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
 }
 
 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
index ef99c49..c15f7db 100644 (file)
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
 
 static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
 {
-       struct iwl_mvm_rss_sync_notif notif = {
-               .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
-               .metadata.sync = 0,
-               .nssn_sync.baid = baid,
-               .nssn_sync.nssn = nssn,
-       };
-
-       iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+       if (IWL_MVM_USE_NSSN_SYNC) {
+               struct iwl_mvm_rss_sync_notif notif = {
+                       .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+                       .metadata.sync = 0,
+                       .nssn_sync.baid = baid,
+                       .nssn_sync.nssn = nssn,
+               };
+
+               iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
+                                               sizeof(notif));
+       }
 }
 
 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
index a046ac9..a5af8f4 100644 (file)
@@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
                cmd_size = sizeof(struct iwl_scan_config_v2);
        else
                cmd_size = sizeof(struct iwl_scan_config_v1);
-       cmd_size += num_channels;
+       cmd_size += mvm->fw->ucode_capa.n_scan_channels;
 
        cfg = kzalloc(cmd_size, GFP_KERNEL);
        if (!cfg)
index dc5c02f..ddfc9a6 100644 (file)
@@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
 /*
  * Allocates and sets the Tx cmd the driver data pointers in the skb
  */
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
                      struct ieee80211_tx_info *info, int hdrlen,
                      struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
 
        dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (unlikely(!dev_cmd))
                return NULL;
 
-       /* Make sure we zero enough of dev_cmd */
-       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
-       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
-       memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
        dev_cmd->hdr.cmd = TX_CMD;
 
        if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -597,7 +592,7 @@ out:
 }
 
 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
-                                      struct iwl_device_cmd *cmd)
+                                      struct iwl_device_tx_cmd *cmd)
 {
        struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 
@@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info info;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        __le16 fc = hdr->frame_control;
@@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct iwl_mvm_sta *mvmsta;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        __le16 fc;
        u16 seq_number = 0;
        u8 tid = IWL_MAX_TID_COUNT;
@@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                spin_unlock(&mvmsta->lock);
-               return 0;
+               return -1;
        }
 
        if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1206,8 +1201,8 @@ drop:
        return -1;
 }
 
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
-                  struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+                      struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct ieee80211_tx_info info;
index d38cefb..e249e3f 100644 (file)
 #include "internal.h"
 #include "iwl-prph.h"
 
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+                                                   size_t size,
+                                                   dma_addr_t *phys,
+                                                   int depth)
+{
+       void *result;
+
+       if (WARN(depth > 2,
+                "failed to allocate DMA memory not crossing 2^32 boundary"))
+               return NULL;
+
+       result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+       if (!result)
+               return NULL;
+
+       if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+               void *old = result;
+               dma_addr_t oldphys = *phys;
+
+               result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+                                                               phys,
+                                                               depth + 1);
+               dma_free_coherent(trans->dev, size, old, oldphys);
+       }
+
+       return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+                                                  size_t size,
+                                                  dma_addr_t *phys)
+{
+       return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
 {
        struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
        struct iwl_context_info *ctxt_info;
        struct iwl_context_info_rbd_cfg *rx_cfg;
        u32 control_flags = 0, rb_size;
+       dma_addr_t phys;
        int ret;
 
-       ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
-                                      &trans_pcie->ctxt_info_dma_addr,
-                                      GFP_KERNEL);
+       ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+                                                         sizeof(*ctxt_info),
+                                                         &phys);
        if (!ctxt_info)
                return -ENOMEM;
 
+       trans_pcie->ctxt_info_dma_addr = phys;
+
        ctxt_info->version.version = 0;
        ctxt_info->version.mac_id =
                cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
index a091690..f14bcef 100644 (file)
@@ -305,7 +305,7 @@ struct iwl_cmd_meta {
 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 
 struct iwl_pcie_txq_entry {
-       struct iwl_device_cmd *cmd;
+       void *cmd;
        struct sk_buff *skb;
        /* buffer to free after command completes */
        const void *free_buf;
@@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
 /*****************************************************
 * TX / HCMD
 ******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+       return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
 int iwl_pcie_tx_init(struct iwl_trans *trans);
 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
                          int queue_size);
@@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
                                  struct iwl_txq *txq);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                     struct iwl_device_cmd *dev_cmd, int txq_id);
+                     struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
                            struct sk_buff *skb);
 #ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+                                     struct sk_buff *skb);
 #endif
 
 /* common functions that are used by gen3 transport */
@@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                                 unsigned int timeout);
 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                          struct iwl_device_cmd *dev_cmd, int txq_id);
+                          struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
                                  struct iwl_host_cmd *cmd);
 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
index 452da44..f0b8ff6 100644 (file)
@@ -1529,13 +1529,13 @@ out:
 
        napi = &rxq->napi;
        if (napi->poll) {
+               napi_gro_flush(napi, false);
+
                if (napi->rx_count) {
                        netif_receive_skb_list(&napi->rx_list);
                        INIT_LIST_HEAD(&napi->rx_list);
                        napi->rx_count = 0;
                }
-
-               napi_gro_flush(napi, false);
        }
 
        iwl_pcie_rxq_restock(trans, rxq);
index a067713..f60d66f 100644 (file)
@@ -79,6 +79,7 @@
 #include "iwl-agn-hw.h"
 #include "fw/error-dump.h"
 #include "fw/dbg.h"
+#include "fw/api/tx.h"
 #include "internal.h"
 #include "iwl-fh.h"
 
@@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
        u16 cap;
 
        /*
-        * HW bug W/A for instability in PCIe bus L0S->L1 transition.
-        * Check if BIOS (or OS) enabled L1-ASPM on this device.
-        * If so (likely), disable L0S, so device moves directly L0->L1;
-        *    costs negligible amount of power savings.
-        * If not (unlikely), enable L0S, so there is at least some
-        *    power savings, even without L1.
+        * L0S states have been found to be unstable with our devices
+        * and in newer hardware they are not officially supported at
+        * all, so we must always set the L0S_DISABLED bit.
         */
+       iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
-               iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-       else
-               iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
 
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 {
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
-       int ret, addr_size;
+       int ret, addr_size, txcmd_size, txcmd_align;
+       const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+       if (!cfg_trans->gen2) {
+               ops = &trans_ops_pcie;
+               txcmd_size = sizeof(struct iwl_tx_cmd);
+               txcmd_align = sizeof(void *);
+       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+               txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+               txcmd_align = 64;
+       } else {
+               txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+               txcmd_align = 128;
+       }
+
+       txcmd_size += sizeof(struct iwl_cmd_header);
+       txcmd_size += 36; /* biggest possible 802.11 header */
+
+       /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+               return ERR_PTR(-EINVAL);
 
        ret = pcim_enable_device(pdev);
        if (ret)
                return ERR_PTR(ret);
 
-       if (cfg_trans->gen2)
-               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                                       &pdev->dev, &trans_ops_pcie_gen2);
-       else
-               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                                       &pdev->dev, &trans_ops_pcie);
-
+       trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+                               txcmd_size, txcmd_align);
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
index 8ca0250..bfb984b 100644 (file)
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
        int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
        struct iwl_tfh_tb *tb;
 
+       /*
+        * Only WARN here so we know about the issue, but we mess up our
+        * unmap path because not every place currently checks for errors
+        * returned from this function - it can only return an error if
+        * there's no more space, and so when we know there is enough we
+        * don't always check ...
+        */
+       WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+            "possible DMA problem with iova:0x%llx, len:%d\n",
+            (unsigned long long)addr, len);
+
        if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
                return -EINVAL;
        tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
        return idx;
 }
 
+static struct page *get_workaround_page(struct iwl_trans *trans,
+                                       struct sk_buff *skb)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct page **page_ptr;
+       struct page *ret;
+
+       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+       ret = alloc_page(GFP_ATOMIC);
+       if (!ret)
+               return NULL;
+
+       /* set the chaining pointer to the previous page if there */
+       *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+       *page_ptr = ret;
+
+       return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+                                       struct sk_buff *skb,
+                                       struct iwl_tfh_tfd *tfd,
+                                       dma_addr_t phys, void *virt,
+                                       u16 len, struct iwl_cmd_meta *meta)
+{
+       dma_addr_t oldphys = phys;
+       struct page *page;
+       int ret;
+
+       if (unlikely(dma_mapping_error(trans->dev, phys)))
+               return -ENOMEM;
+
+       if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+               ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+               if (ret < 0)
+                       goto unmap;
+
+               if (meta)
+                       meta->tbs |= BIT(ret);
+
+               ret = 0;
+               goto trace;
+       }
+
+       /*
+        * Work around a hardware bug. If (as expressed in the
+        * condition above) the TB ends on a 32-bit boundary,
+        * then the next TB may be accessed with the wrong
+        * address.
+        * To work around it, copy the data elsewhere and make
+        * a new mapping for it so the device will not fail.
+        */
+
+       if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+               ret = -ENOBUFS;
+               goto unmap;
+       }
+
+       page = get_workaround_page(trans, skb);
+       if (!page) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
+
+       memcpy(page_address(page), virt, len);
+
+       phys = dma_map_single(trans->dev, page_address(page), len,
+                             DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(trans->dev, phys)))
+               return -ENOMEM;
+       ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+       if (ret < 0) {
+               /* unmap the new allocation as single */
+               oldphys = phys;
+               meta = NULL;
+               goto unmap;
+       }
+       IWL_WARN(trans,
+                "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+                len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+       ret = 0;
+unmap:
+       if (meta)
+               dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+       else
+               dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+       trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+       return ret;
+}
+
 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                                     struct sk_buff *skb,
                                     struct iwl_tfh_tfd *tfd, int start_len,
-                                    u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+                                    u8 hdr_len,
+                                    struct iwl_device_tx_cmd *dev_cmd)
 {
 #ifdef CONFIG_INET
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
        u16 length, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
-       struct page **page_ptr;
        struct tso_t tso;
 
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
-       hdr_page = get_page_hdr(trans, hdr_room);
+       hdr_page = get_page_hdr(trans, hdr_room, skb);
        if (!hdr_page)
                return -ENOMEM;
 
-       get_page(hdr_page->page);
        start_hdr = hdr_page->pos;
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-       *page_ptr = hdr_page->page;
 
        /*
         * Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                        dev_kfree_skb(csum_skb);
                        goto out_err;
                }
+               /*
+                * No need for _with_wa, this is from the TSO page and
+                * we leave some space at the end of it so can't hit
+                * the buggy scenario.
+                */
                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
                trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
                                        tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
 
                /* put the payload */
                while (data_left) {
+                       int ret;
+
                        tb_len = min_t(unsigned int, tso.size, data_left);
                        tb_phys = dma_map_single(trans->dev, tso.data,
                                                 tb_len, DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+                       ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+                                                          tb_phys, tso.data,
+                                                          tb_len, NULL);
+                       if (ret) {
                                dev_kfree_skb(csum_skb);
                                goto out_err;
                        }
-                       iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
-                       trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
-                                               tb_phys, tb_len);
 
                        data_left -= tb_len;
                        tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
                                          struct iwl_txq *txq,
-                                         struct iwl_device_cmd *dev_cmd,
+                                         struct iwl_device_tx_cmd *dev_cmd,
                                          struct sk_buff *skb,
                                          struct iwl_cmd_meta *out_meta,
                                          int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
 
        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 
+       /*
+        * No need for _with_wa, the first TB allocation is aligned up
+        * to a 64-byte boundary and thus can't be at the end or cross
+        * a page boundary (much less a 2^32 boundary).
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
        /*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
        tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                goto out_err;
+       /*
+        * No need for _with_wa(), we ensure (via alignment) that the data
+        * here can never cross or end at a page boundary.
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
 
        if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                dma_addr_t tb_phys;
-               int tb_idx;
+               unsigned int fragsz = skb_frag_size(frag);
+               int ret;
 
-               if (!skb_frag_size(frag))
+               if (!fragsz)
                        continue;
 
                tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
-                                          skb_frag_size(frag), DMA_TO_DEVICE);
-
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
-                       return -ENOMEM;
-               tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
-                                             skb_frag_size(frag));
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
-                                       tb_phys, skb_frag_size(frag));
-               if (tb_idx < 0)
-                       return tb_idx;
-
-               out_meta->tbs |= BIT(tb_idx);
+                                          fragsz, DMA_TO_DEVICE);
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  skb_frag_address(frag),
+                                                  fragsz, out_meta);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
                                    struct iwl_txq *txq,
-                                   struct iwl_device_cmd *dev_cmd,
+                                   struct iwl_device_tx_cmd *dev_cmd,
                                    struct sk_buff *skb,
                                    struct iwl_cmd_meta *out_meta,
                                    int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        /* The first TB points to bi-directional DMA data */
        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 
+       /*
+        * No need for _with_wa, the first TB allocation is aligned up
+        * to a 64-byte boundary and thus can't be at the end or cross
+        * a page boundary (much less a 2^32 boundary).
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
        /*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                goto out_err;
+       /*
+        * No need for _with_wa(), we ensure (via alignment) that the data
+        * here can never cross or end at a page boundary.
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
                             IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        tb2_len = skb_headlen(skb) - hdr_len;
 
        if (tb2_len > 0) {
+               int ret;
+
                tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
                                         tb2_len, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  skb->data + hdr_len, tb2_len,
+                                                  NULL);
+               if (ret)
                        goto out_err;
-               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
-                                       tb_phys, tb2_len);
        }
 
        if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
                goto out_err;
 
        skb_walk_frags(skb, frag) {
+               int ret;
+
                tb_phys = dma_map_single(trans->dev, frag->data,
                                         skb_headlen(frag), DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  frag->data,
+                                                  skb_headlen(frag), NULL);
+               if (ret)
                        goto out_err;
-               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
-                                       tb_phys, skb_headlen(frag));
                if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
                        goto out_err;
        }
@@ -538,7 +670,7 @@ out_err:
 static
 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
                                            struct iwl_txq *txq,
-                                           struct iwl_device_cmd *dev_cmd,
+                                           struct iwl_device_tx_cmd *dev_cmd,
                                            struct sk_buff *skb,
                                            struct iwl_cmd_meta *out_meta)
 {
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 }
 
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                          struct iwl_device_cmd *dev_cmd, int txq_id)
+                          struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_cmd_meta *out_meta;
@@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                /* don't put the packet on the ring, if there is no room */
                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
-                       struct iwl_device_cmd **dev_cmd_ptr;
+                       struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
                                               trans_pcie->dev_cmd_offs);
index f21f16a..b0eb52b 100644 (file)
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        u8 sec_ctl = 0;
        u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
        __le16 bc_ent;
-       struct iwl_tx_cmd *tx_cmd =
-               (void *)txq->entries[txq->write_ptr].cmd->payload;
+       struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        u8 sta_id = tx_cmd->sta_id;
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
        int read_ptr = txq->read_ptr;
        u8 sta_id = 0;
        __le16 bc_ent;
-       struct iwl_tx_cmd *tx_cmd =
-               (void *)txq->entries[read_ptr].cmd->payload;
+       struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
 
        WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
 
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
                            struct sk_buff *skb)
 {
        struct page **page_ptr;
+       struct page *next;
 
        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+       next = *page_ptr;
+       *page_ptr = NULL;
 
-       if (*page_ptr) {
-               __free_page(*page_ptr);
-               *page_ptr = NULL;
+       while (next) {
+               struct page *tmp = next;
+
+               next = *(void **)(page_address(next) + PAGE_SIZE -
+                                 sizeof(void *));
+               __free_page(tmp);
        }
 }
 
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
                while (!skb_queue_empty(&overflow_skbs)) {
                        struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
-                       struct iwl_device_cmd *dev_cmd_ptr;
+                       struct iwl_device_tx_cmd *dev_cmd_ptr;
 
                        dev_cmd_ptr = *(void **)((u8 *)skb->cb +
                                                 trans_pcie->dev_cmd_offs);
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 }
 
 #ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+                                     struct sk_buff *skb)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+       struct page **page_ptr;
+
+       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+       if (WARN_ON(*page_ptr))
+               return NULL;
 
        if (!p->page)
                goto alloc;
 
-       /* enough room on this page */
-       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
-               return p;
+       /*
+        * Check if there's enough room on this page
+        *
+        * Note that we put a page chaining pointer *last* in the
+        * page - we need it somewhere, and if it's there then we
+        * avoid DMA mapping the last bits of the page which may
+        * trigger the 32-bit boundary hardware bug.
+        *
+        * (see also get_workaround_page() in tx-gen2.c)
+        */
+       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+                          sizeof(void *))
+               goto out;
 
        /* We don't have enough room on this page, get a new one. */
        __free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
        if (!p->page)
                return NULL;
        p->pos = page_address(p->page);
+       /* set the chaining pointer to NULL */
+       *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+       *page_ptr = p->page;
+       get_page(p->page);
        return p;
 }
 
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_txq *txq, u8 hdr_len,
                                   struct iwl_cmd_meta *out_meta,
-                                  struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                                  struct iwl_device_tx_cmd *dev_cmd,
+                                  u16 tb1_len)
 {
        struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
        u16 length, iv_len, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
-       struct page **page_ptr;
        struct tso_t tso;
 
        /* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
-       hdr_page = get_page_hdr(trans, hdr_room);
+       hdr_page = get_page_hdr(trans, hdr_room, skb);
        if (!hdr_page)
                return -ENOMEM;
 
-       get_page(hdr_page->page);
        start_hdr = hdr_page->pos;
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-       *page_ptr = hdr_page->page;
        memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
        hdr_page->pos += iv_len;
 
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_txq *txq, u8 hdr_len,
                                   struct iwl_cmd_meta *out_meta,
-                                  struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                                  struct iwl_device_tx_cmd *dev_cmd,
+                                  u16 tb1_len)
 {
        /* No A-MSDU without CONFIG_INET */
        WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 #endif /* CONFIG_INET */
 
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                     struct iwl_device_cmd *dev_cmd, int txq_id)
+                     struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                /* don't put the packet on the ring, if there is no room */
                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
-                       struct iwl_device_cmd **dev_cmd_ptr;
+                       struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
                                               trans_pcie->dev_cmd_offs);
index 57edfad..c9401c1 100644 (file)
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
        int hw, ap, ap_max = ie[1];
        u8 hw_rate;
 
+       if (ap_max > MAX_RATES) {
+               lbs_deb_assoc("invalid rates\n");
+               return tlv;
+       }
        /* Advance past IE header */
        ie += 2;
 
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
        struct cmd_ds_802_11_ad_hoc_join cmd;
        u8 preamble = RADIO_PREAMBLE_SHORT;
        int ret = 0;
+       int hw, i;
+       u8 rates_max;
+       u8 *rates;
 
        /* TODO: set preamble based on scan result */
        ret = lbs_set_radio(priv, preamble, 1);
@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
        if (!rates_eid) {
                lbs_add_rates(cmd.bss.rates);
        } else {
-               int hw, i;
-               u8 rates_max = rates_eid[1];
-               u8 *rates = cmd.bss.rates;
+               rates_max = rates_eid[1];
+               if (rates_max > MAX_RATES) {
+                       lbs_deb_join("invalid rates");
+                       goto out;
+               }
+               rates = cmd.bss.rates;
                for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
                        u8 hw_rate = lbs_rates[hw].bitrate / 5;
                        for (i = 0; i < rates_max; i++) {
index 55116f3..a4a7854 100644 (file)
@@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
                        return 0;
 
                sband = dev->hw->wiphy->bands[status->band];
-               if (!sband || status->rate_idx > sband->n_bitrates)
+               if (!sband || status->rate_idx >= sband->n_bitrates)
                        return 0;
 
                rate = &sband->bitrates[status->rate_idx];
index b9f2a40..96018fd 100644 (file)
@@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
 {
        struct ieee80211_hw *hw = dev->hw;
 
-       mt76_led_cleanup(dev);
+       if (IS_ENABLED(CONFIG_MT76_LEDS))
+               mt76_led_cleanup(dev);
        mt76_tx_status_check(dev, NULL, true);
        ieee80211_unregister_hw(hw);
 }
index 4937a08..fbeb9f7 100644 (file)
@@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
 
 #ifdef CONFIG_PCI_ATS
 /*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS.  Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
  */
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
 {
-       pci_info(pdev, "disabling ATS (broken on this device)\n");
+       if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+               return;
+
+       pci_info(pdev, "disabling ATS\n");
        pdev->ats_cap = 0;
 }
 
 /* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
 #endif /* CONFIG_PCI_ATS */
 
 /* Freescale PCIe doesn't support MSI in RC mode */
index 55083c6..95dca2c 100644 (file)
@@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
 
        if (ret < 0) {
                dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
-               goto ddr_perf_err;
+               goto cpuhp_state_err;
        }
 
        pmu->cpuhp_state = ret;
 
        /* Register the pmu instance for cpu hotplug */
-       cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+       ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+       if (ret) {
+               dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+               goto cpuhp_instance_err;
+       }
 
        /* Request irq */
        irq = of_irq_get(np, 0);
@@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
        return 0;
 
 ddr_perf_err:
-       if (pmu->cpuhp_state)
-               cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
-
+       cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+       cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
        ida_simple_remove(&ddr_ida, pmu->id);
        dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
        return ret;
@@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
        struct ddr_pmu *pmu = platform_get_drvdata(pdev);
 
        cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+       cpuhp_remove_multi_state(pmu->cpuhp_state);
        irq_set_affinity_hint(pmu->irq, NULL);
 
        perf_pmu_unregister(&pmu->pmu);
index 96183e3..584de8f 100644 (file)
@@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
        hisi_pmu->ops->stop_counters(hisi_pmu);
 }
 
+
 /*
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
- * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
- * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
- * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
+ *
+ * - For MT variants of TSV110:
+ *   SCCL is Aff2[7:3], CCL is Aff2[2:0]
+ *
+ * - For other MT parts:
+ *   SCCL is Aff3[7:0], CCL is Aff2[7:0]
+ *
+ * - For non-MT parts:
+ *   SCCL is Aff2[7:0], CCL is Aff1[7:0]
  */
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
 {
        u64 mpidr = read_cpuid_mpidr();
-
-       if (mpidr & MPIDR_MT_BITMASK) {
-               if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
-                       int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
-                       if (sccl_id)
-                               *sccl_id = aff2 >> 3;
-                       if (ccl_id)
-                               *ccl_id = aff2 & 0x7;
-               } else {
-                       if (sccl_id)
-                               *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
-                       if (ccl_id)
-                               *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-               }
+       int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+       int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+       int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       bool mt = mpidr & MPIDR_MT_BITMASK;
+       int sccl, ccl;
+
+       if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+               sccl = aff2 >> 3;
+               ccl = aff2 & 0x7;
+       } else if (mt) {
+               sccl = aff3;
+               ccl = aff2;
        } else {
-               if (sccl_id)
-                       *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-               if (ccl_id)
-                       *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+               sccl = aff2;
+               ccl = aff1;
        }
+
+       if (scclp)
+               *scclp = sccl;
+       if (cclp)
+               *cclp = ccl;
 }
 
 /*
index 2bbd8ee..46600d9 100644 (file)
@@ -1535,15 +1535,8 @@ int pinctrl_init_done(struct device *dev)
        return ret;
 }
 
-#ifdef CONFIG_PM
-
-/**
- * pinctrl_pm_select_state() - select pinctrl state for PM
- * @dev: device to select default state for
- * @state: state to set
- */
-static int pinctrl_pm_select_state(struct device *dev,
-                                  struct pinctrl_state *state)
+static int pinctrl_select_bound_state(struct device *dev,
+                                     struct pinctrl_state *state)
 {
        struct dev_pin_info *pins = dev->pins;
        int ret;
@@ -1558,15 +1551,27 @@ static int pinctrl_pm_select_state(struct device *dev,
 }
 
 /**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_select_default_state() - select default pinctrl state
  * @dev: device to select default state for
  */
-int pinctrl_pm_select_default_state(struct device *dev)
+int pinctrl_select_default_state(struct device *dev)
 {
        if (!dev->pins)
                return 0;
 
-       return pinctrl_pm_select_state(dev, dev->pins->default_state);
+       return pinctrl_select_bound_state(dev, dev->pins->default_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_select_default_state);
+
+#ifdef CONFIG_PM
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+       return pinctrl_select_default_state(dev);
 }
 EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
 
@@ -1579,7 +1584,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev)
        if (!dev->pins)
                return 0;
 
-       return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
+       return pinctrl_select_bound_state(dev, dev->pins->sleep_state);
 }
 EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
 
@@ -1592,7 +1597,7 @@ int pinctrl_pm_select_idle_state(struct device *dev)
        if (!dev->pins)
                return 0;
 
-       return pinctrl_pm_select_state(dev, dev->pins->idle_state);
+       return pinctrl_select_bound_state(dev, dev->pins->idle_state);
 }
 EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
 #endif
index 44d7f50..d936e7a 100644 (file)
@@ -49,6 +49,7 @@
                .padown_offset = SPT_PAD_OWN,           \
                .padcfglock_offset = SPT_PADCFGLOCK,    \
                .hostown_offset = SPT_HOSTSW_OWN,       \
+               .is_offset = SPT_GPI_IS,                \
                .ie_offset = SPT_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
index 706207d..77be37a 100644 (file)
@@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
        item = pdata->items;
 
        for (i = 0; i < pdata->counter; i++, item++) {
+               if (item->capability) {
+                       /*
+                        * Read group capability register to get actual number
+                        * of interrupt capable components and set group mask
+                        * accordingly.
+                        */
+                       ret = regmap_read(priv->regmap, item->capability,
+                                         &regval);
+                       if (ret)
+                               goto out;
+
+                       item->mask = GENMASK((regval & item->mask) - 1, 0);
+               }
+
                /* Clear group presense event. */
                ret = regmap_write(priv->regmap, item->reg +
                                   MLXREG_HOTPLUG_EVENT_OFF, 0);
index 27d5b40..587403c 100644 (file)
@@ -997,7 +997,6 @@ config INTEL_SCU_IPC
 config INTEL_SCU_IPC_UTIL
        tristate "Intel SCU IPC utility driver"
        depends on INTEL_SCU_IPC
-       default y
        ---help---
          The IPC Util driver provides an interface with the SCU enabling
          low level access for debug work and updating the firmware. Say
@@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM
        depends on PCI && IOSF_MBI && PM
        help
          Power-management driver for Intel's Image Signal Processor found on
-         Bay and Cherry Trail devices. This dummy driver's sole purpose is to
-         turn the ISP off (put it in D3) to save power and to allow entering
-         of S0ix modes.
+         Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+         is to turn the ISP off (put it in D3) to save power and to allow
+         entering of S0ix modes.
 
          To compile this driver as a module, choose M here: the module
          will be called intel_atomisp2_pm.
@@ -1337,6 +1336,17 @@ config PCENGINES_APU2
          To compile this driver as a module, choose M here: the module
          will be called pcengines-apuv2.
 
+config INTEL_UNCORE_FREQ_CONTROL
+       tristate "Intel Uncore frequency control driver"
+       depends on X86_64
+       help
+         This driver allows control of uncore frequency limits on
+         supported server platforms.
+         Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+         To compile this driver as a module, choose M here: the module
+         will be called intel-uncore-frequency.
+
 source "drivers/platform/x86/intel_speed_select_if/Kconfig"
 
 config SYSTEM76_ACPI
index 42d85a0..3747b1f 100644 (file)
@@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM)     += intel_atomisp2_pm.o
 obj-$(CONFIG_PCENGINES_APU2)   += pcengines-apuv2.o
 obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
 obj-$(CONFIG_SYSTEM76_ACPI)    += system76_acpi.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL)        += intel-uncore-frequency.o
index b361c73..6f12747 100644 (file)
@@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
        { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
        { KE_IGNORE, 0x6E, },  /* Low Battery notification */
+       { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
        { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
        { KE_KEY, 0x7c, { KEY_MICMUTE } },
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
index 982f0cc..43bb15e 100644 (file)
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
 #define NOTIFY_KBD_BRTDWN              0xc5
 #define NOTIFY_KBD_BRTTOGGLE           0xc7
 #define NOTIFY_KBD_FBM                 0x99
+#define NOTIFY_KBD_TTP                 0xae
 
 #define ASUS_WMI_FNLOCK_BIOS_DISABLED  BIT(0)
 
@@ -81,6 +82,10 @@ MODULE_LICENSE("GPL");
 #define ASUS_FAN_BOOST_MODE_SILENT_MASK                0x02
 #define ASUS_FAN_BOOST_MODES_MASK              0x03
 
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT   0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT    2
+
 #define USB_INTEL_XUSB2PR              0xD0
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
 
@@ -198,6 +203,9 @@ struct asus_wmi {
        u8 fan_boost_mode_mask;
        u8 fan_boost_mode;
 
+       bool throttle_thermal_policy_available;
+       u8 throttle_thermal_policy_mode;
+
        // The RSOC controls the maximum charging percentage.
        bool battery_rsoc_available;
 
@@ -1718,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev,
 // Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
 static DEVICE_ATTR_RW(fan_boost_mode);
 
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+       u32 result;
+       int err;
+
+       asus->throttle_thermal_policy_available = false;
+
+       err = asus_wmi_get_devstate(asus,
+                                   ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+                                   &result);
+       if (err) {
+               if (err == -ENODEV)
+                       return 0;
+               return err;
+       }
+
+       if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+               asus->throttle_thermal_policy_available = true;
+
+       return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+       int err;
+       u8 value;
+       u32 retval;
+
+       value = asus->throttle_thermal_policy_mode;
+
+       err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+                                   value, &retval);
+       if (err) {
+               pr_warn("Failed to set throttle thermal policy: %d\n", err);
+               return err;
+       }
+
+       if (retval != 1) {
+               pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+                       retval);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+       if (!asus->throttle_thermal_policy_available)
+               return 0;
+
+       asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+       return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+       u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+
+       if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+               new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+       asus->throttle_thermal_policy_mode = new_mode;
+       return throttle_thermal_policy_write(asus);
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+       u8 mode = asus->throttle_thermal_policy_mode;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       int result;
+       u8 new_mode;
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+
+       result = kstrtou8(buf, 10, &new_mode);
+       if (result < 0)
+               return result;
+
+       if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+               return -EINVAL;
+
+       asus->throttle_thermal_policy_mode = new_mode;
+       throttle_thermal_policy_write(asus);
+
+       return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
 /* Backlight ******************************************************************/
 
 static int read_backlight_power(struct asus_wmi *asus)
@@ -1999,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
                return;
        }
 
+       if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
+               throttle_thermal_policy_switch_next(asus);
+               return;
+       }
+
        if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
                return;
 
@@ -2149,6 +2263,7 @@ static struct attribute *platform_attributes[] = {
        &dev_attr_lid_resume.attr,
        &dev_attr_als_enable.attr,
        &dev_attr_fan_boost_mode.attr,
+       &dev_attr_throttle_thermal_policy.attr,
        NULL
 };
 
@@ -2172,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
                devid = ASUS_WMI_DEVID_ALS_ENABLE;
        else if (attr == &dev_attr_fan_boost_mode.attr)
                ok = asus->fan_boost_mode_available;
+       else if (attr == &dev_attr_throttle_thermal_policy.attr)
+               ok = asus->throttle_thermal_policy_available;
 
        if (devid != -1)
                ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -2431,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev)
        if (err)
                goto fail_fan_boost_mode;
 
+       err = throttle_thermal_policy_check_present(asus);
+       if (err)
+               goto fail_throttle_thermal_policy;
+       else
+               throttle_thermal_policy_set_default(asus);
+
        err = asus_wmi_sysfs_init(asus->platform_device);
        if (err)
                goto fail_sysfs;
@@ -2515,6 +2638,7 @@ fail_hwmon:
 fail_input:
        asus_wmi_sysfs_exit(asus->platform_device);
 fail_sysfs:
+fail_throttle_thermal_policy:
 fail_fan_boost_mode:
 fail_platform:
        kfree(asus);
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
new file mode 100644 (file)
index 0000000..2b1a073
--- /dev/null
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_UNCORE_RATIO_LIMIT                 0x620
+#define UNCORE_FREQ_KHZ_MULTIPLIER             100000
+
+/**
+ * struct uncore_data -        Encapsulate all uncore data
+ * @stored_uncore_data:        Last user changed MSR 620 value, which will be restored
+ *                     on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu:       Designated CPU for a die to read/write
+ * @valid:             Mark the data valid/invalid
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+       struct kobject kobj;
+       u64 stored_uncore_data;
+       u32 initial_min_freq_khz;
+       u32 initial_max_freq_khz;
+       int control_cpu;
+       bool valid;
+};
+
+#define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Root of the all uncore sysfs kobjs */
+struct kobject uncore_root_kobj;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+
+struct uncore_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct kobject *kobj,
+                       struct attribute *attr, char *buf);
+       ssize_t (*store)(struct kobject *kobj,
+                        struct attribute *attr, const char *c, ssize_t count);
+};
+
+#define define_one_uncore_ro(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_uncore_rw(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define show_uncore_data(member_name)                                  \
+       static ssize_t show_##member_name(struct kobject *kobj,         \
+                                         struct attribute *attr,       \
+                                         char *buf)                    \
+       {                                                               \
+               struct uncore_data *data = to_uncore_data(kobj);        \
+               return scnprintf(buf, PAGE_SIZE, "%u\n",                \
+                                data->member_name);                    \
+       }                                                               \
+       define_one_uncore_ro(member_name)
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+/* Common function to read MSR 0x620 and read min/max */
+static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
+                            unsigned int *max)
+{
+       u64 cap;
+       int ret;
+
+       ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+       if (ret)
+               return ret;
+
+       *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+       *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+       return 0;
+}
+
+/* Common function to set min/max ratios to be used by sysfs callbacks */
+static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
+                             int set_max)
+{
+       int ret;
+       u64 cap;
+
+       mutex_lock(&uncore_lock);
+
+       input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+       if (!input || input > 0x7F) {
+               ret = -EINVAL;
+               goto finish_write;
+       }
+
+       ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+       if (ret)
+               goto finish_write;
+
+       if (set_max) {
+               cap &= ~0x7F;
+               cap |= input;
+       } else  {
+               cap &= ~GENMASK(14, 8);
+               cap |= (input << 8);
+       }
+
+       ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+       if (ret)
+               goto finish_write;
+
+       data->stored_uncore_data = cap;
+
+finish_write:
+       mutex_unlock(&uncore_lock);
+
+       return ret;
+}
+
+static ssize_t store_min_max_freq_khz(struct kobject *kobj,
+                                     struct attribute *attr,
+                                     const char *buf, ssize_t count,
+                                     int min_max)
+{
+       struct uncore_data *data = to_uncore_data(kobj);
+       unsigned int input;
+
+       if (kstrtouint(buf, 10, &input))
+               return -EINVAL;
+
+       uncore_write_ratio(data, input, min_max);
+
+       return count;
+}
+
+static ssize_t show_min_max_freq_khz(struct kobject *kobj,
+                                    struct attribute *attr,
+                                    char *buf, int min_max)
+{
+       struct uncore_data *data = to_uncore_data(kobj);
+       unsigned int min, max;
+       int ret;
+
+       mutex_lock(&uncore_lock);
+       ret = uncore_read_ratio(data, &min, &max);
+       mutex_unlock(&uncore_lock);
+       if (ret)
+               return ret;
+
+       if (min_max)
+               return sprintf(buf, "%u\n", max);
+
+       return sprintf(buf, "%u\n", min);
+}
+
+#define store_uncore_min_max(name, min_max)                            \
+       static ssize_t store_##name(struct kobject *kobj,               \
+                                   struct attribute *attr,             \
+                                   const char *buf, ssize_t count)     \
+       {                                                               \
+                                                                       \
+               return store_min_max_freq_khz(kobj, attr, buf, count,   \
+                                             min_max);                 \
+       }
+
+#define show_uncore_min_max(name, min_max)                             \
+       static ssize_t show_##name(struct kobject *kobj,                \
+                                  struct attribute *attr, char *buf)   \
+       {                                                               \
+                                                                       \
+               return show_min_max_freq_khz(kobj, attr, buf, min_max); \
+       }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+define_one_uncore_rw(min_freq_khz);
+define_one_uncore_rw(max_freq_khz);
+
+static struct attribute *uncore_attrs[] = {
+       &initial_min_freq_khz.attr,
+       &initial_max_freq_khz.attr,
+       &max_freq_khz.attr,
+       &min_freq_khz.attr,
+       NULL
+};
+
+static struct kobj_type uncore_ktype = {
+       .sysfs_ops = &kobj_sysfs_ops,
+       .default_attrs = uncore_attrs,
+};
+
+static struct kobj_type uncore_root_ktype = {
+       .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+       int id = topology_logical_die_id(cpu);
+
+       if (id >= 0 && id < uncore_max_entries)
+               return &uncore_instances[id];
+
+       return NULL;
+}
+
+static void uncore_add_die_entry(int cpu)
+{
+       struct uncore_data *data;
+
+       mutex_lock(&uncore_lock);
+       data = uncore_get_instance(cpu);
+       if (!data) {
+               mutex_unlock(&uncore_lock);
+               return;
+       }
+
+       if (data->valid) {
+               /* control cpu changed */
+               data->control_cpu = cpu;
+       } else {
+               char str[64];
+               int ret;
+
+               memset(data, 0, sizeof(*data));
+               sprintf(str, "package_%02d_die_%02d",
+                       topology_physical_package_id(cpu),
+                       topology_die_id(cpu));
+
+               uncore_read_ratio(data, &data->initial_min_freq_khz,
+                                 &data->initial_max_freq_khz);
+
+               ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
+                                          &uncore_root_kobj, str);
+               if (!ret) {
+                       data->control_cpu = cpu;
+                       data->valid = true;
+               }
+       }
+       mutex_unlock(&uncore_lock);
+}
+
+/* Last CPU in this die is offline, so remove sysfs entries */
+static void uncore_remove_die_entry(int cpu)
+{
+       struct uncore_data *data;
+
+       mutex_lock(&uncore_lock);
+       data = uncore_get_instance(cpu);
+       if (data) {
+               kobject_put(&data->kobj);
+               data->control_cpu = -1;
+               data->valid = false;
+       }
+       mutex_unlock(&uncore_lock);
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+       int target;
+
+       /* Check if there is an online cpu in the package for uncore MSR */
+       target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+       if (target < nr_cpu_ids)
+               return 0;
+
+       /* Use this CPU on this die as a control CPU */
+       cpumask_set_cpu(cpu, &uncore_cpu_mask);
+       uncore_add_die_entry(cpu);
+
+       return 0;
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+       int target;
+
+       /* Check if existing cpu is used for uncore MSRs */
+       if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+               return 0;
+
+       /* Find a new cpu to set uncore MSR */
+       target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+       if (target < nr_cpu_ids) {
+               cpumask_set_cpu(target, &uncore_cpu_mask);
+               uncore_add_die_entry(target);
+       } else {
+               uncore_remove_die_entry(cpu);
+       }
+
+       return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+                           void *_unused)
+{
+       int cpu;
+
+       switch (mode) {
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+       case PM_POST_SUSPEND:
+               for_each_cpu(cpu, &uncore_cpu_mask) {
+                       struct uncore_data *data;
+                       int ret;
+
+                       data = uncore_get_instance(cpu);
+                       if (!data || !data->valid || !data->stored_uncore_data)
+                               continue;
+
+                       ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
+                                           data->stored_uncore_data);
+                       if (ret)
+                               return ret;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+       .notifier_call = uncore_pm_notify,
+};
+
+#define ICPU(model)     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+       ICPU(INTEL_FAM6_BROADWELL_G),
+       ICPU(INTEL_FAM6_BROADWELL_X),
+       ICPU(INTEL_FAM6_BROADWELL_D),
+       ICPU(INTEL_FAM6_SKYLAKE_X),
+       ICPU(INTEL_FAM6_ICELAKE_X),
+       ICPU(INTEL_FAM6_ICELAKE_D),
+       {}
+};
+
+static int __init intel_uncore_init(void)
+{
+       const struct x86_cpu_id *id;
+       int ret;
+
+       id = x86_match_cpu(intel_uncore_cpu_ids);
+       if (!id)
+               return -ENODEV;
+
+       uncore_max_entries = topology_max_packages() *
+                                       topology_max_die_per_package();
+       uncore_instances = kcalloc(uncore_max_entries,
+                                  sizeof(*uncore_instances), GFP_KERNEL);
+       if (!uncore_instances)
+               return -ENOMEM;
+
+       ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
+                                  &cpu_subsys.dev_root->kobj,
+                                  "intel_uncore_frequency");
+       if (ret)
+               goto err_free;
+
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                               "platform/x86/uncore-freq:online",
+                               uncore_event_cpu_online,
+                               uncore_event_cpu_offline);
+       if (ret < 0)
+               goto err_rem_kobj;
+
+       uncore_hp_state = ret;
+
+       ret = register_pm_notifier(&uncore_pm_nb);
+       if (ret)
+               goto err_rem_state;
+
+       return 0;
+
+err_rem_state:
+       cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+       kobject_put(&uncore_root_kobj);
+err_free:
+       kfree(uncore_instances);
+
+       return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+       int i;
+
+       unregister_pm_notifier(&uncore_pm_nb);
+       cpuhp_remove_state(uncore_hp_state);
+       for (i = 0; i < uncore_max_entries; ++i) {
+               if (uncore_instances[i].valid)
+                       kobject_put(&uncore_instances[i].kobj);
+       }
+       kobject_put(&uncore_root_kobj);
+       kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
index b0f421f..805fc0d 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
- * Trail devices. The sole purpose of this driver is to allow the ISP to
- * be put in D3.
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
  *
  * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
  *
@@ -36,8 +36,7 @@
 static int isp_set_power(struct pci_dev *dev, bool enable)
 {
        unsigned long timeout;
-       u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
-               ISPSSPM0_IUNIT_POWER_OFF;
+       u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
 
        /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
        iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
@@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable)
 
        /*
         * There should be no IUNIT access while power-down is
-        * in progress HW sighting: 4567865
+        * in progress. HW sighting: 4567865.
         * Wait up to 50 ms for the IUNIT to shut down.
         * And we do the same for power on.
         */
        timeout = jiffies + msecs_to_jiffies(50);
-       while (1) {
+       do {
                u32 tmp;
 
                /* Wait until ISPSSPM0 bit[25:24] shows the right value */
                iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
                tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
                if (tmp == val)
-                       break;
+                       return 0;
 
-               if (time_after(jiffies, timeout)) {
-                       dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
-                               enable ? "on" : "off");
-                       return -EBUSY;
-               }
                usleep_range(1000, 2000);
-       }
+       } while (time_before(jiffies, timeout));
 
-       return 0;
+       dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+       return -EBUSY;
 }
 
 static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
index 292bace..6f43683 100644 (file)
@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       ddata = (struct mid_pb_ddata *)id->driver_data;
+       ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
+                            sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
-               return -ENODATA;
+               return -ENOMEM;
 
        ddata->dev = &pdev->dev;
        ddata->irq = irq;
index 571b475..144faa8 100644 (file)
@@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = {
        {"GEN2 USB2PCIE2 PLL",          SPT_PMC_BIT_MPHY_CMN_LANE1},
        {"DMIPCIE3 PLL",                SPT_PMC_BIT_MPHY_CMN_LANE2},
        {"SATA PLL",                    SPT_PMC_BIT_MPHY_CMN_LANE3},
-       {},
+       {}
 };
 
 static const struct pmc_bit_map spt_mphy_map[] = {
@@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = {
        {"MPHY CORE LANE 13",          SPT_PMC_BIT_MPHY_LANE13},
        {"MPHY CORE LANE 14",          SPT_PMC_BIT_MPHY_LANE14},
        {"MPHY CORE LANE 15",          SPT_PMC_BIT_MPHY_LANE15},
-       {},
+       {}
 };
 
 static const struct pmc_bit_map spt_pfear_map[] = {
@@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = {
        {"CSME_SMS1",                   SPT_PMC_BIT_CSME_SMS1},
        {"CSME_RTC",                    SPT_PMC_BIT_CSME_RTC},
        {"CSME_PSF",                    SPT_PMC_BIT_CSME_PSF},
-       {},
+       {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+       spt_pfear_map,
+       NULL
 };
 
 static const struct pmc_bit_map spt_ltr_show_map[] = {
@@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = {
 };
 
 static const struct pmc_reg_map spt_reg_map = {
-       .pfear_sts = spt_pfear_map,
+       .pfear_sts = ext_spt_pfear_map,
        .mphy_sts = spt_mphy_map,
        .pll_sts = spt_pll_map,
        .ltr_show_sts = spt_ltr_show_map,
@@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
        {"SDX",                 BIT(4)},
        {"SPE",                 BIT(5)},
        {"Fuse",                BIT(6)},
-       /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+       /*
+        * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+        * Tiger Lake and Elkhart Lake.
+        */
        {"SBR8",                BIT(7)},
 
        {"CSME_FSC",            BIT(0)},
@@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
        {"HDA_PGD4",            BIT(2)},
        {"HDA_PGD5",            BIT(3)},
        {"HDA_PGD6",            BIT(4)},
-       /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+       /*
+        * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+        * Tiger Lake and ELkhart Lake.
+        */
        {"PSF6",                BIT(5)},
        {"PSF7",                BIT(6)},
        {"PSF8",                BIT(7)},
+       {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+       cnp_pfear_map,
+       NULL
+};
 
+static const struct pmc_bit_map icl_pfear_map[] = {
        /* Ice Lake generation onwards only */
        {"RES_65",              BIT(0)},
        {"RES_66",              BIT(1)},
@@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
        {}
 };
 
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+       cnp_pfear_map,
+       icl_pfear_map,
+       NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+       /* Tiger Lake and Elkhart Lake generation onwards only */
+       {"PSF9",                BIT(0)},
+       {"RES_66",              BIT(1)},
+       {"RES_67",              BIT(2)},
+       {"RES_68",              BIT(3)},
+       {"RES_69",              BIT(4)},
+       {"RES_70",              BIT(5)},
+       {"TBTLSX",              BIT(6)},
+       {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+       cnp_pfear_map,
+       tgl_pfear_map,
+       NULL
+};
+
 static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
        {"AUDIO_D3",            BIT(0)},
        {"OTG_D3",              BIT(1)},
@@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
        cnp_slps0_dbg0_map,
        cnp_slps0_dbg1_map,
        cnp_slps0_dbg2_map,
-       NULL,
+       NULL
 };
 
 static const struct pmc_bit_map cnp_ltr_show_map[] = {
@@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
 };
 
 static const struct pmc_reg_map cnp_reg_map = {
-       .pfear_sts = cnp_pfear_map,
+       .pfear_sts = ext_cnp_pfear_map,
        .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
        .slps0_dbg_maps = cnp_slps0_dbg_maps,
        .ltr_show_sts = cnp_ltr_show_map,
@@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = {
 };
 
 static const struct pmc_reg_map icl_reg_map = {
-       .pfear_sts = cnp_pfear_map,
+       .pfear_sts = ext_icl_pfear_map,
        .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
        .slps0_dbg_maps = cnp_slps0_dbg_maps,
        .ltr_show_sts = cnp_ltr_show_map,
@@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = {
        .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
 };
 
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
-       return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map tgl_reg_map = {
+       .pfear_sts = ext_tgl_pfear_map,
+       .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+       .slps0_dbg_maps = cnp_slps0_dbg_maps,
+       .ltr_show_sts = cnp_ltr_show_map,
+       .msr_sts = msr_map,
+       .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+       .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+       .regmap_length = CNP_PMC_MMIO_REG_LEN,
+       .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+       .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+       .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+       .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+       .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+};
 
 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
 {
        return readl(pmcdev->regbase + reg_offset);
 }
 
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
-                                                       reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+                                     u32 val)
 {
        writel(val, pmcdev->regbase + reg_offset);
 }
@@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void)
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 static bool slps0_dbg_latch;
 
-static void pmc_core_display_map(struct seq_file *s, int index,
-                                u8 pf_reg, const struct pmc_bit_map *pf_map)
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+       return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+                                u8 pf_reg, const struct pmc_bit_map **pf_map)
 {
        seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
-                  index, pf_map[index].name,
-                  pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+                  ip, pf_map[idx][index].name,
+                  pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
 }
 
 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
 {
        struct pmc_dev *pmcdev = s->private;
-       const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+       const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
        u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
-       int index, iter;
+       int index, iter, idx, ip = 0;
 
        iter = pmcdev->map->ppfear0_offset;
 
@@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
             index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
                pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
 
-       for (index = 0; map[index].name &&
-            index < pmcdev->map->ppfear_buckets * 8; index++)
-               pmc_core_display_map(s, index, pf_regs[index / 8], map);
+       for (idx = 0; maps[idx]; idx++) {
+               for (index = 0; maps[idx][index].name &&
+                    index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+                       pmc_core_display_map(s, index, idx, ip,
+                                            pf_regs[index / 8], maps);
+       }
 
        return 0;
 }
@@ -561,21 +623,22 @@ out_unlock:
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+                                        const char __user *userbuf,
+                                        size_t count, loff_t *ppos)
 {
        struct pmc_dev *pmcdev = &pmc;
        const struct pmc_reg_map *map = pmcdev->map;
        u32 val, buf_size, fd;
-       int err = 0;
+       int err;
 
        buf_size = count < 64 ? count : 64;
-       mutex_lock(&pmcdev->lock);
 
-       if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
-               err = -EFAULT;
-               goto out_unlock;
-       }
+       err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+       if (err)
+               return err;
+
+       mutex_lock(&pmcdev->lock);
 
        if (val > map->ltr_ignore_max) {
                err = -EINVAL;
@@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
        debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
                            &pmc_core_dev_state);
 
-       debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
-                           &pmc_core_ppfear_fops);
+       if (pmcdev->map->pfear_sts)
+               debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+                                   pmcdev, &pmc_core_ppfear_fops);
 
        debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
                            &pmc_core_ltr_ignore_ops);
@@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
        INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
        INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
+       INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
+       INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
+       INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
        {}
 };
 
 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
 
 static const struct pci_device_id pmc_pci_ids[] = {
-       { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
-       { 0, },
+       { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+       { }
 };
 
 /*
  * This quirk can be used on those platforms where
- * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * the platform BIOS enforces 24Mhz crystal to shutdown
  * before PMC can assert SLP_S0#.
  */
 static int quirk_xtal_ignore(const struct dmi_system_id *id)
index 8203ae3..f1a0792 100644 (file)
@@ -186,6 +186,8 @@ enum ppfear_regs {
 #define ICL_NUM_IP_IGN_ALLOWED                 20
 #define ICL_PMC_LTR_WIGIG                      0x1BFC
 
+#define TGL_NUM_IP_IGN_ALLOWED                 22
+
 struct pmc_bit_map {
        const char *name;
        u32 bit_mask;
@@ -213,7 +215,7 @@ struct pmc_bit_map {
  * captures them to have a common implementation.
  */
 struct pmc_reg_map {
-       const struct pmc_bit_map *pfear_sts;
+       const struct pmc_bit_map **pfear_sts;
        const struct pmc_bit_map *mphy_sts;
        const struct pmc_bit_map *pll_sts;
        const struct pmc_bit_map **slps0_dbg_maps;
index 5c1da2b..2433bf7 100644 (file)
  */
 
 #include <linux/acpi.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
 #include <linux/delay.h>
-#include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/notifier.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/suspend.h>
 
 #include <asm/intel_pmc_ipc.h>
 
@@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset)
        writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
 }
 
-static inline u8 __maybe_unused ipc_data_readb(u32 offset)
-{
-       return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
 static inline u32 ipc_data_readl(u32 offset)
 {
        return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
@@ -210,35 +195,6 @@ static inline int is_gcr_valid(u32 offset)
        return 0;
 }
 
-/**
- * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register
- * @offset:    offset of GCR register from GCR address base
- * @data:      data pointer for storing the register output
- *
- * Reads the 32-bit PMC GCR register at given offset.
- *
- * Return:     negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
-       int ret;
-
-       spin_lock(&ipcdev.gcr_lock);
-
-       ret = is_gcr_valid(offset);
-       if (ret < 0) {
-               spin_unlock(&ipcdev.gcr_lock);
-               return ret;
-       }
-
-       *data = readl(ipcdev.gcr_mem_base + offset);
-
-       spin_unlock(&ipcdev.gcr_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
-
 /**
  * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
  * @offset:    offset of GCR register from GCR address base
@@ -268,36 +224,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data)
 }
 EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
 
-/**
- * intel_pmc_gcr_write() - Write PMC GCR register
- * @offset:    offset of GCR register from GCR address base
- * @data:      register update value
- *
- * Writes the PMC GCR register of given offset with given
- * value.
- *
- * Return:     negative value on error or 0 on success.
- */
-int intel_pmc_gcr_write(u32 offset, u32 data)
-{
-       int ret;
-
-       spin_lock(&ipcdev.gcr_lock);
-
-       ret = is_gcr_valid(offset);
-       if (ret < 0) {
-               spin_unlock(&ipcdev.gcr_lock);
-               return ret;
-       }
-
-       writel(data, ipcdev.gcr_mem_base + offset);
-
-       spin_unlock(&ipcdev.gcr_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
-
 /**
  * intel_pmc_gcr_update() - Update PMC GCR register bits
  * @offset:    offset of GCR register from GCR address base
@@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
  *
  * Return:     negative value on error or 0 on success.
  */
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
 {
        u32 new_val;
        int ret = 0;
@@ -339,7 +265,6 @@ gcr_ipc_unlock:
        spin_unlock(&ipcdev.gcr_lock);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
 
 static int update_no_reboot_bit(void *priv, bool set)
 {
@@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void)
  *
  * Return:     an IPC error code or 0 on success.
  */
-int intel_pmc_ipc_simple_command(int cmd, int sub)
+static int intel_pmc_ipc_simple_command(int cmd, int sub)
 {
        int ret;
 
@@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
 
 /**
  * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
@@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
  *
  * Return:     an IPC error code or 0 on success.
  */
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
-                         u32 outlen, u32 dptr, u32 sptr)
+static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+                                u32 outlen, u32 dptr, u32 sptr)
 {
        u32 wbuf[4] = { 0 };
        int ret;
@@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
 
 /**
  * intel_pmc_ipc_command() -  IPC command with input/output data
@@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
        }
        return (ssize_t)count;
 }
+static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
 
 static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
                                             struct device_attribute *attr,
@@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
        int subcmd;
        int ret;
 
-       if (kstrtoul(buf, 0, &val))
-               return -EINVAL;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
 
        if (val)
                subcmd = 1;
@@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
        }
        return (ssize_t)count;
 }
-
-static DEVICE_ATTR(simplecmd, S_IWUSR,
-                  NULL, intel_pmc_ipc_simple_cmd_store);
-static DEVICE_ATTR(northpeak, S_IWUSR,
-                  NULL, intel_pmc_ipc_northpeak_store);
+static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
 
 static struct attribute *intel_ipc_attrs[] = {
        &dev_attr_northpeak.attr,
@@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = {
        .attrs = intel_ipc_attrs,
 };
 
+static const struct attribute_group *intel_ipc_groups[] = {
+       &intel_ipc_group,
+       NULL
+};
+
 static struct resource punit_res_array[] = {
        /* Punit BIOS */
        {
@@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
-                       ret);
-               goto err_sys;
-       }
-
        ipcdev.has_gcr_regs = true;
 
        return 0;
-err_sys:
-       devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
+
 err_irq:
        platform_device_unregister(ipcdev.tco_dev);
        platform_device_unregister(ipcdev.punit_dev);
@@ -980,7 +898,6 @@ err_irq:
 
 static int ipc_plat_remove(struct platform_device *pdev)
 {
-       sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
        devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
        platform_device_unregister(ipcdev.tco_dev);
        platform_device_unregister(ipcdev.punit_dev);
@@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = {
        .driver = {
                .name = "pmc-ipc-plat",
                .acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+               .dev_groups = intel_ipc_groups,
        },
 };
 
index cdab916..3d7da52 100644 (file)
 #include <asm/intel_scu_ipc.h>
 
 /* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY        0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE      0xFE /* Firmware update */
-#define IPCMSG_PCNTRL         0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION    0xF4 /* Get firmware revision */
+#define IPCMSG_PCNTRL         0xff /* Power controller unit read/write */
 
 /* Command id associated with message IPCMSG_PCNTRL */
 #define IPC_CMD_PCNTRL_W      0 /* Register write */
 #define IPC_RWBUF_SIZE    20           /* IPC Read buffer Size */
 #define IPC_IOC                  0x100         /* IPC command register IOC bit */
 
-#define PCI_DEVICE_ID_LINCROFT         0x082a
-#define PCI_DEVICE_ID_PENWELL          0x080e
-#define PCI_DEVICE_ID_CLOVERVIEW       0x08ea
-#define PCI_DEVICE_ID_TANGIER          0x11a0
-
-/* intel scu ipc driver data */
-struct intel_scu_ipc_pdata_t {
-       u32 i2c_base;
-       u32 i2c_len;
-       u8 irq_mode;
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
-       .i2c_base = 0xff12b000,
-       .i2c_len = 0x10,
-       .irq_mode = 0,
-};
-
-/* Penwell and Cloverview */
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
-       .i2c_base = 0xff12b000,
-       .i2c_len = 0x10,
-       .irq_mode = 1,
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
-       .i2c_base  = 0xff00d000,
-       .i2c_len = 0x10,
-       .irq_mode = 0,
-};
-
 struct intel_scu_ipc_dev {
        struct device *dev;
        void __iomem *ipc_base;
-       void __iomem *i2c_base;
        struct completion cmd_complete;
        u8 irq_mode;
 };
 
 static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
 
+#define IPC_STATUS             0x04
+#define IPC_STATUS_IRQ         BIT(2)
+#define IPC_STATUS_ERR         BIT(1)
+#define IPC_STATUS_BUSY                BIT(0)
+
 /*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
  */
+#define IPC_WRITE_BUFFER       0x80
 #define IPC_READ_BUFFER                0x90
 
-#define IPC_I2C_CNTRL_ADDR     0
-#define I2C_DATA_ADDR          0x04
+/* Timeout in jiffies */
+#define IPC_TIMEOUT            (3 * HZ)
 
 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
 
@@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
  */
 static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
 {
-       if (scu->irq_mode) {
-               reinit_completion(&scu->cmd_complete);
-               writel(cmd | IPC_IOC, scu->ipc_base);
-       }
-       writel(cmd, scu->ipc_base);
+       reinit_completion(&scu->cmd_complete);
+       writel(cmd | IPC_IOC, scu->ipc_base);
 }
 
 /*
@@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
  */
 static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
 {
-       writel(data, scu->ipc_base + 0x80 + offset);
+       writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
 }
 
 /*
@@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32
  */
 static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
 {
-       return __raw_readl(scu->ipc_base + 0x04);
+       return __raw_readl(scu->ipc_base + IPC_STATUS);
 }
 
 /* Read ipc byte data */
@@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
 /* Wait till scu status is busy */
 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
 {
-       u32 status = ipc_read_status(scu);
-       u32 loop_count = 100000;
+       unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
 
-       /* break if scu doesn't reset busy bit after huge retry */
-       while ((status & BIT(0)) && --loop_count) {
-               udelay(1); /* scu processing time is in few u secods */
-               status = ipc_read_status(scu);
-       }
+       do {
+               u32 status;
 
-       if (status & BIT(0)) {
-               dev_err(scu->dev, "IPC timed out");
-               return -ETIMEDOUT;
-       }
+               status = ipc_read_status(scu);
+               if (!(status & IPC_STATUS_BUSY))
+                       return (status & IPC_STATUS_ERR) ? -EIO : 0;
 
-       if (status & BIT(1))
-               return -EIO;
+               usleep_range(50, 100);
+       } while (time_before(jiffies, end));
 
-       return 0;
+       dev_err(scu->dev, "IPC timed out");
+       return -ETIMEDOUT;
 }
 
 /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
@@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
 {
        int status;
 
-       if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
+       if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
                dev_err(scu->dev, "IPC timed out\n");
                return -ETIMEDOUT;
        }
 
        status = ipc_read_status(scu);
-       if (status & BIT(1))
+       if (status & IPC_STATUS_ERR)
                return -EIO;
 
        return 0;
@@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
 }
 
 /**
- *     intel_scu_ipc_ioread8           -       read a word via the SCU
- *     @addr: register on SCU
- *     @data: return pointer for read byte
+ * intel_scu_ipc_ioread8               -       read a word via the SCU
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
  *
- *     Read a single register. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
  *
- *     This function may sleep.
+ * This function may sleep.
  */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data)
 {
@@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data)
 EXPORT_SYMBOL(intel_scu_ipc_ioread8);
 
 /**
- *     intel_scu_ipc_ioread16          -       read a word via the SCU
- *     @addr: register on SCU
- *     @data: return pointer for read word
- *
- *     Read a register pair. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
- *
- *     This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
-       u16 x[2] = {addr, addr + 1};
-       return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- *     intel_scu_ipc_ioread32          -       read a dword via the SCU
- *     @addr: register on SCU
- *     @data: return pointer for read dword
- *
- *     Read four registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
- *
- *     This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
-       u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-       return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- *     intel_scu_ipc_iowrite8          -       write a byte via the SCU
- *     @addr: register on SCU
- *     @data: byte to write
+ * intel_scu_ipc_iowrite8              -       write a byte via the SCU
+ * @addr: Register on SCU
+ * @data: Byte to write
  *
- *     Write a single register. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
  *
- *     This function may sleep.
+ * This function may sleep.
  */
 int intel_scu_ipc_iowrite8(u16 addr, u8 data)
 {
@@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data)
 EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
 
 /**
- *     intel_scu_ipc_iowrite16         -       write a word via the SCU
- *     @addr: register on SCU
- *     @data: word to write
- *
- *     Write two registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_readvv                -       read a set of registers
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
  *
- *     This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
-       u16 x[2] = {addr, addr + 1};
-       return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- *     intel_scu_ipc_iowrite32         -       write a dword via the SCU
- *     @addr: register on SCU
- *     @data: dword to write
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
  *
- *     Write four registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * The largest array length permitted by the hardware is 5 items.
  *
- *     This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
-       u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-       return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- *     intel_scu_ipc_readvv            -       read a set of registers
- *     @addr: register list
- *     @data: bytes to return
- *     @len: length of array
- *
- *     Read registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
- *
- *     The largest array length permitted by the hardware is 5 items.
- *
- *     This function may sleep.
+ * This function may sleep.
  */
 int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
 {
@@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
 EXPORT_SYMBOL(intel_scu_ipc_readv);
 
 /**
- *     intel_scu_ipc_writev            -       write a set of registers
- *     @addr: register list
- *     @data: bytes to write
- *     @len: length of array
- *
- *     Write registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_writev                -       write a set of registers
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
  *
- *     The largest array length permitted by the hardware is 5 items.
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
  *
- *     This function may sleep.
+ * The largest array length permitted by the hardware is 5 items.
  *
+ * This function may sleep.
  */
 int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 {
@@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 EXPORT_SYMBOL(intel_scu_ipc_writev);
 
 /**
- *     intel_scu_ipc_update_register   -       r/m/w a register
- *     @addr: register address
- *     @bits: bits to update
- *     @mask: mask of bits to update
- *
- *     Read-modify-write power control unit register. The first data argument
- *     must be register value and second is mask value
- *     mask is a bitmap that indicates which bits to update.
- *     0 = masked. Don't modify this bit, 1 = modify this bit.
- *     returns 0 on success or an error code.
- *
- *     This function may sleep. Locking between SCU accesses is handled
- *     for the caller.
+ * intel_scu_ipc_update_register       -       r/m/w a register
+ * @addr: Register address
+ * @bits: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
  */
 int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
 {
@@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
 EXPORT_SYMBOL(intel_scu_ipc_update_register);
 
 /**
- *     intel_scu_ipc_simple_command    -       send a simple command
- *     @cmd: command
- *     @sub: sub type
+ * intel_scu_ipc_simple_command        -       send a simple command
+ * @cmd: Command
+ * @sub: Sub type
  *
- *     Issue a simple command to the SCU. Do not use this interface if
- *     you must then access data as any data values may be overwritten
- *     by another SCU access by the time this function returns.
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
  *
- *     This function may sleep. Locking for SCU accesses is handled for
- *     the caller.
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
  */
 int intel_scu_ipc_simple_command(int cmd, int sub)
 {
@@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
 EXPORT_SYMBOL(intel_scu_ipc_simple_command);
 
 /**
- *     intel_scu_ipc_command   -       command with data
- *     @cmd: command
- *     @sub: sub type
- *     @in: input data
- *     @inlen: input length in dwords
- *     @out: output data
- *     @outlein: output length in dwords
- *
- *     Issue a command to the SCU which involves data transfers. Do the
- *     data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_command       -       command with data
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in dwords
+ * @out: Output data
+ * @outlen: Output length in dwords
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
  */
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
                          u32 *out, int outlen)
@@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
 }
 EXPORT_SYMBOL(intel_scu_ipc_command);
 
-#define IPC_SPTR               0x08
-#define IPC_DPTR               0x0C
-
-/**
- * intel_scu_ipc_raw_command() - IPC command with data and pointers
- * @cmd:       IPC command code.
- * @sub:       IPC command sub type.
- * @in:                input data of this IPC command.
- * @inlen:     input data length in dwords.
- * @out:       output data of this IPC command.
- * @outlen:    output data length in dwords.
- * @sptr:      data writing to SPTR register.
- * @dptr:      data writing to DPTR register.
- *
- * Send an IPC command to SCU with input/output data and source/dest pointers.
- *
- * Return:     an IPC error code or 0 on success.
- */
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
-                             u32 *out, int outlen, u32 dptr, u32 sptr)
-{
-       struct intel_scu_ipc_dev *scu = &ipcdev;
-       int inbuflen = DIV_ROUND_UP(inlen, 4);
-       u32 inbuf[4];
-       int i, err;
-
-       /* Up to 16 bytes */
-       if (inbuflen > 4)
-               return -EINVAL;
-
-       mutex_lock(&ipclock);
-       if (scu->dev == NULL) {
-               mutex_unlock(&ipclock);
-               return -ENODEV;
-       }
-
-       writel(dptr, scu->ipc_base + IPC_DPTR);
-       writel(sptr, scu->ipc_base + IPC_SPTR);
-
-       /*
-        * SRAM controller doesn't support 8-bit writes, it only
-        * supports 32-bit writes, so we have to copy input data into
-        * the temporary buffer, and SCU FW will use the inlen to
-        * determine the actual input data length in the temporary
-        * buffer.
-        */
-       memcpy(inbuf, in, inlen);
-
-       for (i = 0; i < inbuflen; i++)
-               ipc_data_writel(scu, inbuf[i], 4 * i);
-
-       ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
-       err = intel_scu_ipc_check_status(scu);
-       if (!err) {
-               for (i = 0; i < outlen; i++)
-                       *out++ = ipc_data_readl(scu, 4 * i);
-       }
-
-       mutex_unlock(&ipclock);
-       return err;
-}
-EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
-
-/* I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ  2 /* I2C Read command */
-
-/**
- *     intel_scu_ipc_i2c_cntrl         -       I2C read/write operations
- *     @addr: I2C address + command bits
- *     @data: data to read/write
- *
- *     Perform an an I2C read/write operation via the SCU. All locking is
- *     handled for the caller. This function may sleep.
- *
- *     Returns an error code or 0 on success.
- *
- *     This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
-{
-       struct intel_scu_ipc_dev *scu = &ipcdev;
-       u32 cmd = 0;
-
-       mutex_lock(&ipclock);
-       if (scu->dev == NULL) {
-               mutex_unlock(&ipclock);
-               return -ENODEV;
-       }
-       cmd = (addr >> 24) & 0xFF;
-       if (cmd == IPC_I2C_READ) {
-               writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
-               /* Write not getting updated without delay */
-               usleep_range(1000, 2000);
-               *data = readl(scu->i2c_base + I2C_DATA_ADDR);
-       } else if (cmd == IPC_I2C_WRITE) {
-               writel(*data, scu->i2c_base + I2C_DATA_ADDR);
-               usleep_range(1000, 2000);
-               writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
-       } else {
-               dev_err(scu->dev,
-                       "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
-               mutex_unlock(&ipclock);
-               return -EIO;
-       }
-       mutex_unlock(&ipclock);
-       return 0;
-}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
-
 /*
  * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
  * When ioc bit is set to 1, caller api must wait for interrupt handler called
@@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
 static irqreturn_t ioc(int irq, void *dev_id)
 {
        struct intel_scu_ipc_dev *scu = dev_id;
+       int status = ipc_read_status(scu);
 
-       if (scu->irq_mode)
-               complete(&scu->cmd_complete);
+       writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+       complete(&scu->cmd_complete);
 
        return IRQ_HANDLED;
 }
@@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int err;
        struct intel_scu_ipc_dev *scu = &ipcdev;
-       struct intel_scu_ipc_pdata_t *pdata;
 
        if (scu->dev)           /* We support only one SCU */
                return -EBUSY;
 
-       pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
-       if (!pdata)
-               return -ENODEV;
-
-       scu->irq_mode = pdata->irq_mode;
-
        err = pcim_enable_device(pdev);
        if (err)
                return err;
@@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        scu->ipc_base = pcim_iomap_table(pdev)[0];
 
-       scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
-       if (!scu->i2c_base)
-               return -ENOMEM;
-
        err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
                               scu);
        if (err)
@@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 }
 
-#define SCU_DEVICE(id, pdata)  {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
-
 static const struct pci_device_id pci_ids[] = {
-       SCU_DEVICE(PCI_DEVICE_ID_LINCROFT,      intel_scu_ipc_lincroft_pdata),
-       SCU_DEVICE(PCI_DEVICE_ID_PENWELL,       intel_scu_ipc_penwell_pdata),
-       SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW,    intel_scu_ipc_penwell_pdata),
-       SCU_DEVICE(PCI_DEVICE_ID_TANGIER,       intel_scu_ipc_tangier_pdata),
+       { PCI_VDEVICE(INTEL, 0x080e) },
+       { PCI_VDEVICE(INTEL, 0x08ea) },
+       { PCI_VDEVICE(INTEL, 0x11a0) },
        {}
 };
 
index 3de5a3c..0c2aa22 100644 (file)
@@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
        {0x7F, 0x00, 0x0B},
        {0x7F, 0x10, 0x12},
        {0x7F, 0x20, 0x23},
+       {0x94, 0x03, 0x03},
+       {0x95, 0x03, 0x03},
 };
 
 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
@@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
        {0xD0, 0x03, 0x08},
        {0x7F, 0x02, 0x00},
        {0x7F, 0x08, 0x00},
+       {0x95, 0x03, 0x03},
 };
 
 struct isst_cmd {
index e84d3e9..8e3fb55 100644 (file)
@@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file,
        u32 verbosity;
        int err;
 
-       if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
-               return -EFAULT;
+       err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+       if (err)
+               return err;
 
        err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
        if (err) {
                pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
-               count = err;
+               return err;
        }
 
        return count;
@@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file,
        u32 verbosity;
        int err;
 
-       if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
-               return -EFAULT;
+       err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+       if (err)
+               return err;
 
        err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
        if (err) {
                pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
-               count = err;
+               return err;
        }
 
        return count;
index df8565b..c4c742b 100644 (file)
@@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = {
 
 static int telemetry_pltdrv_probe(struct platform_device *pdev)
 {
-       struct resource *res0 = NULL, *res1 = NULL;
        const struct x86_cpu_id *id;
-       int size, ret = -ENOMEM;
+       void __iomem *mem;
+       int ret;
 
        id = x86_match_cpu(telemetry_cpu_ids);
        if (!id)
@@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
 
        telm_conf = (struct telemetry_plt_config *)id->driver_data;
 
-       res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res0) {
-               ret = -EINVAL;
-               goto out;
-       }
-       size = resource_size(res0);
-       if (!devm_request_mem_region(&pdev->dev, res0->start, size,
-                                    pdev->name)) {
-               ret = -EBUSY;
-               goto out;
-       }
-       telm_conf->pss_config.ssram_base_addr = res0->start;
-       telm_conf->pss_config.ssram_size = size;
+       mem = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
-       res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res1) {
-               ret = -EINVAL;
-               goto out;
-       }
-       size = resource_size(res1);
-       if (!devm_request_mem_region(&pdev->dev, res1->start, size,
-                                    pdev->name)) {
-               ret = -EBUSY;
-               goto out;
-       }
+       telm_conf->pss_config.regmap = mem;
 
-       telm_conf->ioss_config.ssram_base_addr = res1->start;
-       telm_conf->ioss_config.ssram_size = size;
+       mem = devm_platform_ioremap_resource(pdev, 1);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
-       telm_conf->pss_config.regmap = ioremap_nocache(
-                                       telm_conf->pss_config.ssram_base_addr,
-                                       telm_conf->pss_config.ssram_size);
-       if (!telm_conf->pss_config.regmap) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       telm_conf->ioss_config.regmap = ioremap_nocache(
-                               telm_conf->ioss_config.ssram_base_addr,
-                               telm_conf->ioss_config.ssram_size);
-       if (!telm_conf->ioss_config.regmap) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       telm_conf->ioss_config.regmap = mem;
 
        mutex_init(&telm_conf->telem_lock);
        mutex_init(&telm_conf->telem_trace_lock);
@@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
        return 0;
 
 out:
-       if (res0)
-               release_mem_region(res0->start, resource_size(res0));
-       if (res1)
-               release_mem_region(res1->start, resource_size(res1));
-       if (telm_conf->pss_config.regmap)
-               iounmap(telm_conf->pss_config.regmap);
-       if (telm_conf->ioss_config.regmap)
-               iounmap(telm_conf->ioss_config.regmap);
        dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
 
        return ret;
@@ -1204,9 +1163,6 @@ out:
 static int telemetry_pltdrv_remove(struct platform_device *pdev)
 {
        telemetry_clear_pltdata();
-       iounmap(telm_conf->pss_config.regmap);
-       iounmap(telm_conf->ioss_config.regmap);
-
        return 0;
 }
 
index 8fe51e4..c27548f 100644 (file)
@@ -35,6 +35,8 @@
 #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET       0x23
 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET       0x24
 #define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION     0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET     0x2b
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET                0x2e
 #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET                0x30
 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET                0x31
 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET                0x32
@@ -46,6 +48,8 @@
 #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET        0x41
 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET     0x42
 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET        0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET     0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
 #define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
 #define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
 #define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET  0x52
@@ -68,6 +72,7 @@
 #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET    0xd1
 #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET  0xd2
 #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET    0xd3
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET        0xe2
 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET       0xe3
 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET     0xe4
 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET     0xe5
 #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET   0xf6
 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET        0xf7
 #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET        0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET    0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET    0xfc
 #define MLXPLAT_CPLD_LPC_IO_RANGE              0x100
 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF           0xdb
 #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF           0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF           0xdc
 
 #define MLXPLAT_CPLD_LPC_PIO_OFFSET            0x10000UL
 #define MLXPLAT_CPLD_LPC_REG1  ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
 #define MLXPLAT_CPLD_LPC_REG2  ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
                                  MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
                                  MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3  ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+                                 MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+                                 MLXPLAT_CPLD_LPC_PIO_OFFSET)
 
 /* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
 #define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF        0x04
 #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
 #define MLXPLAT_CPLD_PSU_MASK          GENMASK(1, 0)
 #define MLXPLAT_CPLD_PWR_MASK          GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK      GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK      GENMASK(3, 0)
 #define MLXPLAT_CPLD_FAN_MASK          GENMASK(3, 0)
 #define MLXPLAT_CPLD_ASIC_MASK         GENMASK(1, 0)
 #define MLXPLAT_CPLD_FAN_NG_MASK       GENMASK(5, 0)
 #define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK        GENMASK(7, 4)
 #define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK        GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK  GENMASK(5, 4)
+#define MLXPLAT_CPLD_I2C_CAP_BIT       0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK      GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF        (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+                                        MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK   0xc1
 
 /* Default I2C parent bus number */
 #define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR       1
 
 /* Maximum number of possible physical buses equipped on system */
 #define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM      16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM  24
 
 /* Number of channels in group */
 #define MLXPLAT_CPLD_GRP_CHNL_NUM              8
 /* Start channel numbers */
 #define MLXPLAT_CPLD_CH1                       2
 #define MLXPLAT_CPLD_CH2                       10
+#define MLXPLAT_CPLD_CH3                       18
 
 /* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS              2
+#define MLXPLAT_CPLD_LPC_MUX_DEVS              3
 
 /* Hotplug devices adapter numbers */
 #define MLXPLAT_CPLD_NR_NONE                   -1
 #define MLXPLAT_CPLD_PSU_DEFAULT_NR            10
 #define MLXPLAT_CPLD_PSU_MSNXXXX_NR            4
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2           3
 #define MLXPLAT_CPLD_FAN1_DEFAULT_NR           11
 #define MLXPLAT_CPLD_FAN2_DEFAULT_NR           12
 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR           13
@@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = {
                               IORESOURCE_IO),
 };
 
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+       {
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+               .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+               .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+       },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+       {
+               .data = mlxplat_mlxcpld_i2c_ng_items_data,
+       },
+};
+
 /* Platform next generation systems i2c data */
 static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+       .items = mlxplat_mlxcpld_i2c_ng_items,
        .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
        .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
        .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
@@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
 static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
 
 /* Platform mux data */
-static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
        {
                .parent = 1,
                .base_nr = MLXPLAT_CPLD_CH1,
@@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
 
 };
 
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+       {
+               .parent = 1,
+               .base_nr = MLXPLAT_CPLD_CH1,
+               .write_only = 1,
+               .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+               .reg_size = 1,
+               .idle_in_use = 1,
+       },
+       {
+               .parent = 1,
+               .base_nr = MLXPLAT_CPLD_CH2,
+               .write_only = 1,
+               .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+               .reg_size = 1,
+               .idle_in_use = 1,
+       },
+       {
+               .parent = 1,
+               .base_nr = MLXPLAT_CPLD_CH3,
+               .write_only = 1,
+               .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+               .reg_size = 1,
+               .idle_in_use = 1,
+       },
+
+};
+
 /* Platform hotplug devices */
 static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
        {
@@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
        },
 };
 
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+       {
+               .label = "psu1",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(0),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu2",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(1),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+};
+
 /* Platform hotplug default data */
 static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
        {
@@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
        },
 };
 
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+       {
+               .data = mlxplat_mlxcpld_comex_psu_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = MLXPLAT_CPLD_PSU_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_pwr_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = MLXPLAT_CPLD_PWR_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+               .inversed = 0,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_fan_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+               .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+               .mask = MLXPLAT_CPLD_FAN_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_asic_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+               .mask = MLXPLAT_CPLD_ASIC_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+               .inversed = 0,
+               .health = true,
+       },
+};
+
 static
 struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
        .items = mlxplat_mlxcpld_default_items,
@@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
        .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
 };
 
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+       .items = mlxplat_mlxcpld_comex_items,
+       .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+       .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+       .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+       .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+       .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
 static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
        {
                .label = "pwr1",
@@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
        .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
 };
 
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+       {
+               .label = "psu1",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(0),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu2",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(1),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu3",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(2),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu4",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(3),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+       {
+               .label = "pwr1",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(0),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+       },
+       {
+               .label = "pwr2",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(1),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+       },
+       {
+               .label = "pwr3",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(2),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+       },
+       {
+               .label = "pwr4",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(3),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+       },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+       {
+               .data = mlxplat_mlxcpld_ext_psu_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+               .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_ext_pwr_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+               .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+               .inversed = 0,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+               .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_asic_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+               .mask = MLXPLAT_CPLD_ASIC_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+               .inversed = 0,
+               .health = true,
+       },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+       .items = mlxplat_mlxcpld_ext_items,
+       .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+       .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+       .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+       .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+       .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
 /* Platform led default data */
 static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
        {
@@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
                .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
 };
 
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+       {
+               .label = "status:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "status:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+       },
+       {
+               .label = "psu:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "psu:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan1:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan1:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan2:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan2:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan3:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan3:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan4:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan4:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "uid:blue",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+               .data = mlxplat_mlxcpld_comex_100G_led_data,
+               .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
 /* Platform register access default */
 static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
        {
@@ -1156,6 +1481,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(3),
                .mode = 0200,
        },
+       {
+               .label = "select_iio",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(6),
+               .mode = 0644,
+       },
        {
                .label = "asic_health",
                .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
@@ -1244,6 +1575,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(3),
                .mode = 0444,
        },
+       {
+               .label = "reset_platform",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(4),
+               .mode = 0444,
+       },
+       {
+               .label = "reset_soc",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(5),
+               .mode = 0444,
+       },
        {
                .label = "reset_comex_wd",
                .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
@@ -1262,6 +1605,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(1),
                .mode = 0444,
        },
+       {
+               .label = "reset_sw_pwr_off",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(2),
+               .mode = 0444,
+       },
        {
                .label = "reset_comex_thermal",
                .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
@@ -1274,6 +1623,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(5),
                .mode = 0444,
        },
+       {
+               .label = "reset_ac_pwr_fail",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(6),
+               .mode = 0444,
+       },
        {
                .label = "psu1_on",
                .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
@@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .bit = GENMASK(7, 0),
                .mode = 0444,
        },
+       {
+               .label = "voltreg_update_status",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+               .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+               .bit = 5,
+               .mode = 0444,
+       },
+       {
+               .label = "vpd_wp",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(3),
+               .mode = 0644,
+       },
+       {
+               .label = "pcie_asic_reset_dis",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(4),
+               .mode = 0644,
+       },
+       {
+               .label = "config1",
+               .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+               .bit = GENMASK(7, 0),
+               .mode = 0444,
+       },
+       {
+               .label = "config2",
+               .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+               .bit = GENMASK(7, 0),
+               .mode = 0444,
+       },
+       {
+               .label = "ufm_version",
+               .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+               .bit = GENMASK(7, 0),
+               .mode = 0444,
+       },
 };
 
 static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
@@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
@@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+       case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
                return true;
        }
        return false;
@@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+       case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
@@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
                return true;
        }
        return false;
@@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
        { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
 };
 
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+       { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+         MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+       { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+       { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+       { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+       { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+       { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
 struct mlxplat_mlxcpld_regmap_context {
        void __iomem *base;
 };
@@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
        .reg_write = mlxplat_mlxcpld_reg_write,
 };
 
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 255,
+       .cache_type = REGCACHE_FLAT,
+       .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+       .readable_reg = mlxplat_mlxcpld_readable_reg,
+       .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+       .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+       .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+       .reg_read = mlxplat_mlxcpld_reg_read,
+       .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 255,
+       .cache_type = REGCACHE_FLAT,
+       .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+       .readable_reg = mlxplat_mlxcpld_readable_reg,
+       .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+       .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+       .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+       .reg_read = mlxplat_mlxcpld_reg_read,
+       .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
 static struct resource mlxplat_mlxcpld_resources[] = {
        [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
 };
@@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_default_channels[i];
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_default_channels[i]);
@@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
        mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
 
        return 1;
-};
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+       int i;
+
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+       mlxplat_mux_data = mlxplat_extended_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
+               mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+               mlxplat_mux_data[i].n_values =
+                               ARRAY_SIZE(mlxplat_msn21xx_channels);
+       }
+       mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+       mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+       mlxplat_led = &mlxplat_comex_100G_led_data;
+       mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+       mlxplat_fan = &mlxplat_default_fan_data;
+       for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+               mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+       mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+       return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+       int i;
+
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
+               mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+               mlxplat_mux_data[i].n_values =
+                               ARRAY_SIZE(mlxplat_msn21xx_channels);
+       }
+       mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+       mlxplat_hotplug->deferred_nr =
+               mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+       mlxplat_led = &mlxplat_default_ng_led_data;
+       mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+       mlxplat_fan = &mlxplat_default_fan_data;
+       for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+               mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+       mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+       mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+       return 1;
+}
 
 static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
        {
@@ -1953,6 +2469,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"),
                },
        },
+       {
+               .callback = mlxplat_dmi_comex_matched,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+               },
+       },
+       {
+               .callback = mlxplat_dmi_ng400_matched,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+               },
+       },
        {
                .callback = mlxplat_dmi_msn274x_matched,
                .matches = {
@@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
        /* Scan adapters from expected id to verify it is free. */
        *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
        for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
-            MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) {
+            mlxplat_max_adap_num; i++) {
                search_adap = i2c_get_adapter(i);
                if (search_adap) {
                        i2c_put_adapter(search_adap);
@@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
        }
 
        /* Return with error if free id for adapter is not found. */
-       if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM)
+       if (i == mlxplat_max_adap_num)
                return -ENODEV;
 
        /* Shift adapter ids, since expected parent adapter is not free. */
        *nr = i;
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       for (i = 0; i < mlxplat_mux_num; i++) {
                shift = *nr - mlxplat_mux_data[i].parent;
                mlxplat_mux_data[i].parent = *nr;
                mlxplat_mux_data[i].base_nr += shift;
@@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void)
        if (nr < 0)
                goto fail_alloc;
 
-       nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr;
+       nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
        if (mlxplat_i2c)
                mlxplat_i2c->regmap = priv->regmap;
        priv->pdev_i2c = platform_device_register_resndata(
@@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void)
                goto fail_alloc;
        }
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       for (i = 0; i < mlxplat_mux_num; i++) {
                priv->pdev_mux[i] = platform_device_register_resndata(
                                                &priv->pdev_i2c->dev,
                                                "i2c-mux-reg", i, NULL,
@@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void)
        platform_device_unregister(priv->pdev_led);
        platform_device_unregister(priv->pdev_hotplug);
 
-       for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+       for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
                platform_device_unregister(priv->pdev_mux[i]);
 
        platform_device_unregister(priv->pdev_i2c);
index 7220577..93177e6 100644 (file)
@@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1686-digma_citi_e200.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-gp-electronic-t701.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
        { }
 };
 
@@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3676-onda-v80-plus-v3.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-onda-v820w-32g.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-y",  8),
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3680-onda-v891w-v1.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3676-onda-v891w-v3.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-pipo-w2s.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
        { }
 };
 
@@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = {
        .properties     = pipo_w2s_props,
 };
 
+static const struct property_entry pipo_w11_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = pipo_w11_props,
+};
+
 static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3692-pov-mobii-wintab-p800w.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-schneider-sct101ctm.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1686-teclast_x98plus2.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
@@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-trekstor-primebook-c11.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = {
 static const struct property_entry trekstor_primebook_c13_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-trekstor-primebook-c13.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = {
 static const struct property_entry trekstor_primetab_t13b_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-trekstor-primetab-t13b.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
@@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
@@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
        PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1686-surftab-wintron70-st70416-6.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -909,6 +908,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
                },
        },
+       {
+               /* Pipo W11 */
+               .driver_data = (void *)&pipo_w11_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+                       /* Above matches are too generic, add bios-ver match */
+                       DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+               },
+       },
        {
                /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
                .driver_data = (void *)&trekstor_surftab_wintron70_data,
@@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
                .driver_data = (void *)&trekstor_surftab_wintron70_data,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
-                       DMI_MATCH(DMI_PRODUCT_NAME,
-                                            "SurfTab wintron 7.0 ST70416-6"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
                        /* Exact match, different versions need different fw */
                        DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
                },
@@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
 }
 
 static int ts_dmi_notifier_call(struct notifier_block *nb,
-                                      unsigned long action, void *data)
+                               unsigned long action, void *data)
 {
        struct device *dev = data;
        struct i2c_client *client;
index 8ef150d..b607958 100644 (file)
@@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
                return SCSI_MLQUEUE_HOST_BUSY;
 
+       if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
        rport = starget_to_rport(scsi_target(sc->device));
        if (!rport) {
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
index 65ce10c..902b649 100644 (file)
@@ -2958,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                q->limits.zoned = BLK_ZONED_HM;
        } else {
                sdkp->zoned = (buffer[8] >> 4) & 3;
-               if (sdkp->zoned == 1)
+               if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
                        /* Host-aware */
                        q->limits.zoned = BLK_ZONED_HA;
-               else
+               } else {
                        /*
-                        * Treat drive-managed devices as
-                        * regular block devices.
+                        * Treat drive-managed devices and host-aware devices
+                        * with partitions as regular block devices.
                         */
                        q->limits.zoned = BLK_ZONED_NONE;
+               }
        }
        if (blk_queue_is_zoned(q) && sdkp->first_scan)
                sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
index cf545f4..4486e05 100644 (file)
@@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS
          called ti_sci_pm_domains. Note this is needed early in boot before
          rootfs may be available.
 
+config TI_K3_RINGACC
+       bool "K3 Ring accelerator Sub System"
+       depends on ARCH_K3 || COMPILE_TEST
+       depends on TI_SCI_INTA_IRQCHIP
+       help
+         Say y here to support the K3 Ring accelerator module.
+         The Ring Accelerator (RINGACC or RA)  provides hardware acceleration
+         to enable straightforward passing of work between a producer
+         and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+         If unsure, say N.
+
 endif # SOC_TI
 
 config TI_SCI_INTA_MSI_DOMAIN
index 788b5cd..bec8279 100644 (file)
@@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS)          += omap_prm.o
 obj-$(CONFIG_WKUP_M3_IPC)              += wkup_m3_ipc.o
 obj-$(CONFIG_TI_SCI_PM_DOMAINS)                += ti_sci_pm_domains.o
 obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN)   += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC)            += k3-ringacc.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644 (file)
index 0000000..5fb2ee2
--- /dev/null
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK            GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+       u32     resv_16[4];
+       u32     db;
+       u32     resv_4[1];
+       u32     occ;
+       u32     indx;
+       u32     hwocc;
+       u32     hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP        0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+       u32     head_data[128];
+       u32     tail_data[128];
+       u32     peek_head_data[128];
+       u32     peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+       u32     revision;
+       u32     config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK              GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+       u32     control;
+       u32     status;
+       u8      resv_512[504];
+       u32     data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP   0x1000
+#define K3_RINGACC_PROXY_NOT_USED      (-1)
+
+enum k3_ringacc_proxy_access_mode {
+       PROXY_ACCESS_MODE_HEAD = 0,
+       PROXY_ACCESS_MODE_TAIL = 1,
+       PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+       PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES  (512U)
+#define K3_RINGACC_FIFO_REGS_STEP      0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT    (127U)
+
+struct k3_ring_ops {
+       int (*push_tail)(struct k3_ring *ring, void *elm);
+       int (*push_head)(struct k3_ring *ring, void *elm);
+       int (*pop_tail)(struct k3_ring *ring, void *elm);
+       int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @free: Number of free elements
+ * @occ: Ring occupancy
+ * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
+ * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+       struct k3_ring_rt_regs __iomem *rt;
+       struct k3_ring_fifo_regs __iomem *fifos;
+       struct k3_ringacc_proxy_target_regs  __iomem *proxy;
+       dma_addr_t      ring_mem_dma;
+       void            *ring_mem_virt;
+       struct k3_ring_ops *ops;
+       u32             size;
+       enum k3_ring_size elm_size;
+       enum k3_ring_mode mode;
+       u32             flags;
+#define K3_RING_FLAG_BUSY      BIT(1)
+#define K3_RING_FLAG_SHARED    BIT(2)
+       u32             free;
+       u32             occ;
+       u32             windex;
+       u32             rindex;
+       u32             ring_id;
+       struct k3_ringacc       *parent;
+       u32             use_count;
+       int             proxy_id;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ */
+struct k3_ringacc {
+       struct device *dev;
+       struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+       void __iomem *proxy_target_base;
+       u32 num_rings; /* number of rings in Ringacc module */
+       unsigned long *rings_inuse;
+       struct ti_sci_resource *rm_gp_range;
+
+       bool dma_ring_reset_quirk;
+       u32 num_proxies;
+       unsigned long *proxy_inuse;
+
+       struct k3_ring *rings;
+       struct list_head list;
+       struct mutex req_lock; /* protect rings allocation */
+
+       const struct ti_sci_handle *tisci;
+       const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+       u32 tisci_dev_id;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+       return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+              (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+       return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+               .push_tail = k3_ringacc_ring_push_mem,
+               .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+               .push_tail = k3_ringacc_ring_push_io,
+               .push_head = k3_ringacc_ring_push_head_io,
+               .pop_tail = k3_ringacc_ring_pop_tail_io,
+               .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+               .push_tail = k3_ringacc_ring_push_tail_proxy,
+               .push_head = k3_ringacc_ring_push_head_proxy,
+               .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+               .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+       struct device *dev = ring->parent->dev;
+
+       dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+       dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+               &ring->ring_mem_dma);
+       dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+               ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+
+       dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+       dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+       dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+       dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+       dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+       if (ring->ring_mem_virt)
+               print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+                                    16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+                                       int id, u32 flags)
+{
+       int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+       mutex_lock(&ringacc->req_lock);
+
+       if (id == K3_RINGACC_RING_ID_ANY) {
+               /* Request for any general purpose ring */
+               struct ti_sci_resource_desc *gp_rings =
+                                               &ringacc->rm_gp_range->desc[0];
+               unsigned long size;
+
+               size = gp_rings->start + gp_rings->num;
+               id = find_next_zero_bit(ringacc->rings_inuse, size,
+                                       gp_rings->start);
+               if (id == size)
+                       goto error;
+       } else if (id < 0) {
+               goto error;
+       }
+
+       if (test_bit(id, ringacc->rings_inuse) &&
+           !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+               goto error;
+       else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+               goto out;
+
+       if (flags & K3_RINGACC_RING_USE_PROXY) {
+               proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+                                             ringacc->num_proxies, 0);
+               if (proxy_id == ringacc->num_proxies)
+                       goto error;
+       }
+
+       if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+               set_bit(proxy_id, ringacc->proxy_inuse);
+               ringacc->rings[id].proxy_id = proxy_id;
+               dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+                       proxy_id);
+       } else {
+               dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+       }
+
+       set_bit(id, ringacc->rings_inuse);
+out:
+       ringacc->rings[id].use_count++;
+       mutex_unlock(&ringacc->req_lock);
+       return &ringacc->rings[id];
+
+error:
+       mutex_unlock(&ringacc->req_lock);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret;
+
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+                       ringacc->tisci_dev_id,
+                       ring->ring_id,
+                       0,
+                       0,
+                       ring->size,
+                       0,
+                       0,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+                       ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return;
+
+       ring->occ = 0;
+       ring->free = 0;
+       ring->rindex = 0;
+       ring->windex = 0;
+
+       k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+                                              enum k3_ring_mode mode)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret;
+
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+                       ringacc->tisci_dev_id,
+                       ring->ring_id,
+                       0,
+                       0,
+                       0,
+                       mode,
+                       0,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+                       ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return;
+
+       if (!ring->parent->dma_ring_reset_quirk)
+               goto reset;
+
+       if (!occ)
+               occ = readl(&ring->rt->occ);
+
+       if (occ) {
+               u32 db_ring_cnt, db_ring_cnt_cur;
+
+               dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+                       ring->ring_id, occ);
+               /* TI-SCI ring reset */
+               k3_ringacc_ring_reset_sci(ring);
+
+               /*
+                * Setup the ring in ring/doorbell mode (if not already in this
+                * mode)
+                */
+               if (ring->mode != K3_RINGACC_RING_MODE_RING)
+                       k3_ringacc_ring_reconfig_qmode_sci(
+                                       ring, K3_RINGACC_RING_MODE_RING);
+               /*
+                * Ring the doorbell 2**22 – ringOcc times.
+                * This will wrap the internal UDMAP ring state occupancy
+                * counter (which is 21-bits wide) to 0.
+                */
+               db_ring_cnt = (1U << 22) - occ;
+
+               while (db_ring_cnt != 0) {
+                       /*
+                        * Ring the doorbell with the maximum count each
+                        * iteration if possible to minimize the total
+                        * of writes
+                        */
+                       if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+                               db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+                       else
+                               db_ring_cnt_cur = db_ring_cnt;
+
+                       writel(db_ring_cnt_cur, &ring->rt->db);
+                       db_ring_cnt -= db_ring_cnt_cur;
+               }
+
+               /* Restore the original ring mode (if not ring mode) */
+               if (ring->mode != K3_RINGACC_RING_MODE_RING)
+                       k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+       }
+
+reset:
+       /* Reset the ring */
+       k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret;
+
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+                       ringacc->tisci_dev_id,
+                       ring->ring_id,
+                       0,
+                       0,
+                       0,
+                       0,
+                       0,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+                       ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc;
+
+       if (!ring)
+               return -EINVAL;
+
+       ringacc = ring->parent;
+
+       dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+       if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+               return -EINVAL;
+
+       mutex_lock(&ringacc->req_lock);
+
+       if (--ring->use_count)
+               goto out;
+
+       if (!(ring->flags & K3_RING_FLAG_BUSY))
+               goto no_init;
+
+       k3_ringacc_ring_free_sci(ring);
+
+       dma_free_coherent(ringacc->dev,
+                         ring->size * (4 << ring->elm_size),
+                         ring->ring_mem_virt, ring->ring_mem_dma);
+       ring->flags = 0;
+       ring->ops = NULL;
+       if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+               clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+               ring->proxy = NULL;
+               ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+       }
+
+no_init:
+       clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+       mutex_unlock(&ringacc->req_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+       if (!ring)
+               return -EINVAL;
+
+       return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+       if (!ring)
+               return -EINVAL;
+
+       return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+       int irq_num;
+
+       if (!ring)
+               return -EINVAL;
+
+       irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+       if (irq_num <= 0)
+               irq_num = -EINVAL;
+       return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       u32 ring_idx;
+       int ret;
+
+       if (!ringacc->tisci)
+               return -EINVAL;
+
+       ring_idx = ring->ring_id;
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+                       ringacc->tisci_dev_id,
+                       ring_idx,
+                       lower_32_bits(ring->ring_mem_dma),
+                       upper_32_bits(ring->ring_mem_dma),
+                       ring->size,
+                       ring->mode,
+                       ring->elm_size,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+                       ret, ring_idx);
+
+       return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret = 0;
+
+       if (!ring || !cfg)
+               return -EINVAL;
+       if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+           cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+           cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+           !test_bit(ring->ring_id, ringacc->rings_inuse))
+               return -EINVAL;
+
+       if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+           ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+           cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+               dev_err(ringacc->dev,
+                       "Message mode must use proxy for %u element size\n",
+                       4 << ring->elm_size);
+               return -EINVAL;
+       }
+
+       /*
+        * In case of shared ring only the first user (master user) can
+        * configure the ring. The sequence should be by the client:
+        * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+        * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+        * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+        * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+        */
+       if (ring->use_count != 1)
+               return 0;
+
+       ring->size = cfg->size;
+       ring->elm_size = cfg->elm_size;
+       ring->mode = cfg->mode;
+       ring->occ = 0;
+       ring->free = 0;
+       ring->rindex = 0;
+       ring->windex = 0;
+
+       if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+               ring->proxy = ringacc->proxy_target_base +
+                             ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+       switch (ring->mode) {
+       case K3_RINGACC_RING_MODE_RING:
+               ring->ops = &k3_ring_mode_ring_ops;
+               break;
+       case K3_RINGACC_RING_MODE_MESSAGE:
+               if (ring->proxy)
+                       ring->ops = &k3_ring_mode_proxy_ops;
+               else
+                       ring->ops = &k3_ring_mode_msg_ops;
+               break;
+       default:
+               ring->ops = NULL;
+               ret = -EINVAL;
+               goto err_free_proxy;
+       };
+
+       ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+                                       ring->size * (4 << ring->elm_size),
+                                       &ring->ring_mem_dma, GFP_KERNEL);
+       if (!ring->ring_mem_virt) {
+               dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+               ret = -ENOMEM;
+               goto err_free_ops;
+       }
+
+       ret = k3_ringacc_ring_cfg_sci(ring);
+
+       if (ret)
+               goto err_free_mem;
+
+       ring->flags |= K3_RING_FLAG_BUSY;
+       ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+                       K3_RING_FLAG_SHARED : 0;
+
+       k3_ringacc_ring_dump(ring);
+
+       return 0;
+
+err_free_mem:
+       dma_free_coherent(ringacc->dev,
+                         ring->size * (4 << ring->elm_size),
+                         ring->ring_mem_virt,
+                         ring->ring_mem_dma);
+err_free_ops:
+       ring->ops = NULL;
+err_free_proxy:
+       ring->proxy = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       if (!ring->free)
+               ring->free = ring->size - readl(&ring->rt->occ);
+
+       return ring->free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+       return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+       K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+       K3_RINGACC_ACCESS_MODE_POP_HEAD,
+       K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+       K3_RINGACC_ACCESS_MODE_POP_TAIL,
+       K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+       K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x)       (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x)     (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+                                    enum k3_ringacc_proxy_access_mode mode)
+{
+       u32 val;
+
+       val = ring->ring_id;
+       val |= K3_RINGACC_PROXY_MODE(mode);
+       val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+       writel(val, &ring->proxy->control);
+       return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+                                       enum k3_ringacc_access_mode access_mode)
+{
+       void __iomem *ptr;
+
+       ptr = (void __iomem *)&ring->proxy->data;
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+               k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               dev_dbg(ring->parent->dev,
+                       "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+               ring->occ--;
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+               dev_dbg(ring->parent->dev,
+                       "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_toio(ptr, elem, (4 << ring->elm_size));
+               ring->free--;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
+               ring->occ);
+       return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+                                    enum k3_ringacc_access_mode access_mode)
+{
+       void __iomem *ptr;
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+               ptr = (void __iomem *)&ring->fifos->head_data;
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               ptr = (void __iomem *)&ring->fifos->tail_data;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               dev_dbg(ring->parent->dev,
+                       "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+               ring->occ--;
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+               dev_dbg(ring->parent->dev,
+                       "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_toio(ptr, elem, (4 << ring->elm_size));
+               ring->free--;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
+               ring->windex, ring->occ, ring->rindex);
+       return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+       void *elem_ptr;
+
+       elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+
+       memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+       ring->windex = (ring->windex + 1) % ring->size;
+       ring->free--;
+       writel(1, &ring->rt->db);
+
+       dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+               ring->free, ring->windex);
+
+       return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+       void *elem_ptr;
+
+       elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+
+       memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+       ring->rindex = (ring->rindex + 1) % ring->size;
+       ring->occ--;
+       writel(-1, &ring->rt->db);
+
+       dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+               ring->occ, ring->rindex, elem_ptr);
+       return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
+               ring->windex);
+
+       if (k3_ringacc_ring_is_full(ring))
+               return -ENOMEM;
+
+       if (ring->ops && ring->ops->push_tail)
+               ret = ring->ops->push_tail(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+               ring->free, ring->windex);
+
+       if (k3_ringacc_ring_is_full(ring))
+               return -ENOMEM;
+
+       if (ring->ops && ring->ops->push_head)
+               ret = ring->ops->push_head(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       if (!ring->occ)
+               ring->occ = k3_ringacc_ring_get_occ(ring);
+
+       dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
+               ring->rindex);
+
+       if (!ring->occ)
+               return -ENODATA;
+
+       if (ring->ops && ring->ops->pop_head)
+               ret = ring->ops->pop_head(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       if (!ring->occ)
+               ring->occ = k3_ringacc_ring_get_occ(ring);
+
+       dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
+               ring->rindex);
+
+       if (!ring->occ)
+               return -ENODATA;
+
+       if (ring->ops && ring->ops->pop_tail)
+               ret = ring->ops->pop_tail(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+                                               const char *property)
+{
+       struct device_node *ringacc_np;
+       struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+       struct k3_ringacc *entry;
+
+       ringacc_np = of_parse_phandle(np, property, 0);
+       if (!ringacc_np)
+               return ERR_PTR(-ENODEV);
+
+       mutex_lock(&k3_ringacc_list_lock);
+       list_for_each_entry(entry, &k3_ringacc_list, list)
+               if (entry->dev->of_node == ringacc_np) {
+                       ringacc = entry;
+                       break;
+               }
+       mutex_unlock(&k3_ringacc_list_lock);
+       of_node_put(ringacc_np);
+
+       return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+       struct device_node *node = ringacc->dev->of_node;
+       struct device *dev = ringacc->dev;
+       struct platform_device *pdev = to_platform_device(dev);
+       int ret;
+
+       if (!node) {
+               dev_err(dev, "device tree info unavailable\n");
+               return -ENODEV;
+       }
+
+       ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+       if (ret) {
+               dev_err(dev, "ti,num-rings read failure %d\n", ret);
+               return ret;
+       }
+
+       ringacc->dma_ring_reset_quirk =
+                       of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+
+       ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+       if (IS_ERR(ringacc->tisci)) {
+               ret = PTR_ERR(ringacc->tisci);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "ti,sci read fail %d\n", ret);
+               ringacc->tisci = NULL;
+               return ret;
+       }
+
+       ret = of_property_read_u32(node, "ti,sci-dev-id",
+                                  &ringacc->tisci_dev_id);
+       if (ret) {
+               dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+               return ret;
+       }
+
+       pdev->id = ringacc->tisci_dev_id;
+
+       ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+                                               ringacc->tisci_dev_id,
+                                               "ti,sci-rm-range-gp-rings");
+       if (IS_ERR(ringacc->rm_gp_range)) {
+               dev_err(dev, "Failed to allocate MSI interrupts\n");
+               return PTR_ERR(ringacc->rm_gp_range);
+       }
+
+       return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+                                                ringacc->rm_gp_range);
+}
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+       struct k3_ringacc *ringacc;
+       void __iomem *base_fifo, *base_rt;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret, i;
+
+       ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+       if (!ringacc)
+               return -ENOMEM;
+
+       ringacc->dev = dev;
+       mutex_init(&ringacc->req_lock);
+
+       dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+                                           DOMAIN_BUS_TI_SCI_INTA_MSI);
+       if (!dev->msi_domain) {
+               dev_err(dev, "Failed to get MSI domain\n");
+               return -EPROBE_DEFER;
+       }
+
+       ret = k3_ringacc_probe_dt(ringacc);
+       if (ret)
+               return ret;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+       base_rt = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base_rt))
+               return PTR_ERR(base_rt);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+       base_fifo = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base_fifo))
+               return PTR_ERR(base_fifo);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+       ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ringacc->proxy_gcfg))
+               return PTR_ERR(ringacc->proxy_gcfg);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                          "proxy_target");
+       ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ringacc->proxy_target_base))
+               return PTR_ERR(ringacc->proxy_target_base);
+
+       ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+                                    K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+       ringacc->rings = devm_kzalloc(dev,
+                                     sizeof(*ringacc->rings) *
+                                     ringacc->num_rings,
+                                     GFP_KERNEL);
+       ringacc->rings_inuse = devm_kcalloc(dev,
+                                           BITS_TO_LONGS(ringacc->num_rings),
+                                           sizeof(unsigned long), GFP_KERNEL);
+       ringacc->proxy_inuse = devm_kcalloc(dev,
+                                           BITS_TO_LONGS(ringacc->num_proxies),
+                                           sizeof(unsigned long), GFP_KERNEL);
+
+       if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+               return -ENOMEM;
+
+       for (i = 0; i < ringacc->num_rings; i++) {
+               ringacc->rings[i].rt = base_rt +
+                                      K3_RINGACC_RT_REGS_STEP * i;
+               ringacc->rings[i].fifos = base_fifo +
+                                         K3_RINGACC_FIFO_REGS_STEP * i;
+               ringacc->rings[i].parent = ringacc;
+               ringacc->rings[i].ring_id = i;
+               ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+       }
+       dev_set_drvdata(dev, ringacc);
+
+       ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+       mutex_lock(&k3_ringacc_list_lock);
+       list_add_tail(&ringacc->list, &k3_ringacc_list);
+       mutex_unlock(&k3_ringacc_list_lock);
+
+       dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+                ringacc->num_rings,
+                ringacc->rm_gp_range->desc[0].start,
+                ringacc->rm_gp_range->desc[0].num,
+                ringacc->tisci_dev_id);
+       dev_info(dev, "dma-ring-reset-quirk: %s\n",
+                ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+       dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+                readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+       return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+       { .compatible = "ti,am654-navss-ringacc", },
+       {},
+};
+
+static struct platform_driver k3_ringacc_driver = {
+       .probe          = k3_ringacc_probe,
+       .driver         = {
+               .name   = "k3-ringacc",
+               .of_match_table = k3_ringacc_of_match,
+               .suppress_bind_attrs = true,
+       },
+};
+builtin_platform_driver(k3_ringacc_driver);
index 870f779..d6ed0c3 100644 (file)
@@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI
          This controller does not support generic SPI messages. It only
          supports the high-level SPI memory interface.
 
+config SPI_HISI_SFC_V3XX
+       tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+       depends on (ARM64 && ACPI) || COMPILE_TEST
+       depends on HAS_IOMEM
+       select CONFIG_MTD_SPI_NOR
+       help
+         This enables support for HiSilicon v3xx SPI-NOR flash controller
+         found in hi16xx chipsets.
+
 config SPI_NXP_FLEXSPI
        tristate "NXP Flex SPI controller"
        depends on ARCH_LAYERSCAPE || HAS_IOMEM
index bb49c9e..9b65ec5 100644 (file)
@@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI)           += spi-fsl-lpspi.o
 obj-$(CONFIG_SPI_FSL_QUADSPI)          += spi-fsl-qspi.o
 obj-$(CONFIG_SPI_FSL_SPI)              += spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)                 += spi-gpio.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX)                += spi-hisi-sfc-v3xx.o
 obj-$(CONFIG_SPI_IMG_SPFI)             += spi-img-spfi.o
 obj-$(CONFIG_SPI_IMX)                  += spi-imx.o
 obj-$(CONFIG_SPI_LANTIQ_SSC)           += spi-lantiq-ssc.o
index 56f0ca3..013458c 100644 (file)
@@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master,
        master->dma_tx = dma_request_chan(dev, "tx");
        if (IS_ERR(master->dma_tx)) {
                err = PTR_ERR(master->dma_tx);
-               if (err == -EPROBE_DEFER) {
-                       dev_warn(dev, "no DMA channel available at the moment\n");
-                       goto error_clear;
-               }
-               dev_err(dev,
-                       "DMA TX channel not available, SPI unable to use DMA\n");
-               err = -EBUSY;
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev, "No TX DMA channel, DMA is disabled\n");
                goto error_clear;
        }
 
-       /*
-        * No reason to check EPROBE_DEFER here since we have already requested
-        * tx channel. If it fails here, it's for another reason.
-        */
-       master->dma_rx = dma_request_slave_channel(dev, "rx");
-
-       if (!master->dma_rx) {
-               dev_err(dev,
-                       "DMA RX channel not available, SPI unable to use DMA\n");
-               err = -EBUSY;
+       master->dma_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(master->dma_rx)) {
+               err = PTR_ERR(master->dma_rx);
+               /*
+                * No reason to check EPROBE_DEFER here since we have already
+                * requested tx channel.
+                */
+               dev_err(dev, "No RX DMA channel, DMA is disabled\n");
                goto error;
        }
 
@@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
 
        return 0;
 error:
-       if (master->dma_rx)
+       if (!IS_ERR(master->dma_rx))
                dma_release_channel(master->dma_rx);
        if (!IS_ERR(master->dma_tx))
                dma_release_channel(master->dma_tx);
index 85bad70..23d295f 100644 (file)
@@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
                name = qspi_irq_tab[val].irq_name;
                if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
                        /* get the l2 interrupts */
-                       irq = platform_get_irq_byname(pdev, name);
+                       irq = platform_get_irq_byname_optional(pdev, name);
                } else if (!num_ints && soc_intc) {
                        /* all mspi, bspi intrs muxed to one L1 intr */
                        irq = platform_get_irq(pdev, 0);
index fb61a62..11c2358 100644 (file)
@@ -68,7 +68,7 @@
 #define BCM2835_SPI_FIFO_SIZE          64
 #define BCM2835_SPI_FIFO_SIZE_3_4      48
 #define BCM2835_SPI_DMA_MIN_LENGTH     96
-#define BCM2835_SPI_NUM_CS             3   /* raise as necessary */
+#define BCM2835_SPI_NUM_CS             4   /* raise as necessary */
 #define BCM2835_SPI_MODE_BITS  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
                                | SPI_NO_CS | SPI_3WIRE)
 
@@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
        }
 }
 
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
-                            struct bcm2835_spi *bs)
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+                           struct bcm2835_spi *bs)
 {
        struct dma_slave_config slave_config;
        const __be32 *addr;
@@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
        addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
        if (!addr) {
                dev_err(dev, "could not get DMA-register address - not using dma mode\n");
-               goto err;
+               /* Fall back to interrupt mode */
+               return 0;
        }
        dma_reg_base = be32_to_cpup(addr);
 
        /* get tx/rx dma */
-       ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
-       if (!ctlr->dma_tx) {
+       ctlr->dma_tx = dma_request_chan(dev, "tx");
+       if (IS_ERR(ctlr->dma_tx)) {
                dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+               ret = PTR_ERR(ctlr->dma_tx);
+               ctlr->dma_tx = NULL;
                goto err;
        }
-       ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
-       if (!ctlr->dma_rx) {
+       ctlr->dma_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(ctlr->dma_rx)) {
                dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+               ret = PTR_ERR(ctlr->dma_rx);
+               ctlr->dma_rx = NULL;
                goto err_release;
        }
 
@@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
        /* all went well, so set can_dma */
        ctlr->can_dma = bcm2835_spi_can_dma;
 
-       return;
+       return 0;
 
 err_config:
        dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
@@ -1005,7 +1010,14 @@ err_config:
 err_release:
        bcm2835_dma_release(ctlr, bs);
 err:
-       return;
+       /*
+        * Only report error for deferred probing, otherwise fall back to
+        * interrupt mode
+        */
+       if (ret != -EPROBE_DEFER)
+               ret = 0;
+
+       return ret;
 }
 
 static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
@@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
        bs->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(bs->clk)) {
                err = PTR_ERR(bs->clk);
-               dev_err(&pdev->dev, "could not get clk: %d\n", err);
+               if (err == -EPROBE_DEFER)
+                       dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+               else
+                       dev_err(&pdev->dev, "could not get clk: %d\n", err);
                goto out_controller_put;
        }
 
@@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
 
        clk_prepare_enable(bs->clk);
 
-       bcm2835_dma_init(ctlr, &pdev->dev, bs);
+       err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+       if (err)
+               goto out_clk_disable;
 
        /* initialise the hardware with the default polarities */
        bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
                               dev_name(&pdev->dev), ctlr);
        if (err) {
                dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
-               goto out_clk_disable;
+               goto out_dma_release;
        }
 
        err = devm_spi_register_controller(&pdev->dev, ctlr);
        if (err) {
                dev_err(&pdev->dev, "could not register SPI controller: %d\n",
                        err);
-               goto out_clk_disable;
+               goto out_dma_release;
        }
 
        bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
 
        return 0;
 
+out_dma_release:
+       bcm2835_dma_release(ctlr, bs);
 out_clk_disable:
        clk_disable_unprepare(bs->clk);
 out_controller_put:
index d84e22d..68491a8 100644 (file)
@@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
 int spi_bitbang_init(struct spi_bitbang *bitbang)
 {
        struct spi_master *master = bitbang->master;
+       bool custom_cs;
 
-       if (!master || !bitbang->chipselect)
+       if (!master)
+               return -EINVAL;
+       /*
+        * We only need the chipselect callback if we are actually using it.
+        * If we just use GPIO descriptors, it is surplus. If the
+        * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+        * driver-specific chipselect routine.
+        */
+       custom_cs = (!master->use_gpio_descriptors ||
+                    (master->flags & SPI_MASTER_GPIO_SS));
+
+       if (custom_cs && !bitbang->chipselect)
                return -EINVAL;
 
        mutex_init(&bitbang->lock);
@@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
        master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
        master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
        master->transfer_one = spi_bitbang_transfer_one;
-       master->set_cs = spi_bitbang_set_cs;
+       /*
+        * When using GPIO descriptors, the ->set_cs() callback doesn't even
+        * get called unless SPI_MASTER_GPIO_SS is set.
+        */
+       if (custom_cs)
+               master->set_cs = spi_bitbang_set_cs;
 
        if (!bitbang->txrx_bufs) {
                bitbang->use_dma = 0;
index 5a25da3..31e3f86 100644 (file)
@@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
        dws->len = transfer->len;
        spin_unlock_irqrestore(&dws->buf_lock, flags);
 
+       /* Ensure dw->rx and dw->rx_end are visible */
+       smp_mb();
+
        spi_enable_chip(dws, 0);
 
        /* Handle per transfer options for bpw and speed */
@@ -469,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        struct spi_controller *master;
        int ret;
 
-       BUG_ON(dws == NULL);
+       if (!dws)
+               return -EINVAL;
 
        master = spi_alloc_master(dev, 0);
        if (!master)
index 8428b69..6ec2dcb 100644 (file)
@@ -396,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
        if (!dma)
                return -ENOMEM;
 
-       dma->chan_rx = dma_request_slave_channel(dev, "rx");
-       if (!dma->chan_rx) {
+       dma->chan_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(dma->chan_rx)) {
                dev_err(dev, "rx dma channel not available\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(dma->chan_rx);
                return ret;
        }
 
-       dma->chan_tx = dma_request_slave_channel(dev, "tx");
-       if (!dma->chan_tx) {
+       dma->chan_tx = dma_request_chan(dev, "tx");
+       if (IS_ERR(dma->chan_tx)) {
                dev_err(dev, "tx dma channel not available\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(dma->chan_tx);
                goto err_tx_channel;
        }
 
index 2cc0ddb..d0b8cc7 100644 (file)
@@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
                fsl_lpspi->watermark = fsl_lpspi->txfifosize;
 
        if (fsl_lpspi_can_dma(controller, spi, t))
-               fsl_lpspi->usedma = 1;
+               fsl_lpspi->usedma = true;
        else
-               fsl_lpspi->usedma = 0;
+               fsl_lpspi->usedma = false;
 
        return fsl_lpspi_config(fsl_lpspi);
 }
@@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
        fsl_lpspi->dev = &pdev->dev;
        fsl_lpspi->is_slave = is_slave;
 
+       controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+       controller->transfer_one = fsl_lpspi_transfer_one;
+       controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+       controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+       controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+       controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+       controller->dev.of_node = pdev->dev.of_node;
+       controller->bus_num = pdev->id;
+       controller->slave_abort = fsl_lpspi_slave_abort;
+
+       ret = devm_spi_register_controller(&pdev->dev, controller);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "spi_register_controller error.\n");
+               goto out_controller_put;
+       }
+
        if (!fsl_lpspi->is_slave) {
                for (i = 0; i < controller->num_chipselect; i++) {
                        int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
                controller->prepare_message = fsl_lpspi_prepare_message;
        }
 
-       controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
-       controller->transfer_one = fsl_lpspi_transfer_one;
-       controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
-       controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
-       controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-       controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
-       controller->dev.of_node = pdev->dev.of_node;
-       controller->bus_num = pdev->id;
-       controller->slave_abort = fsl_lpspi_slave_abort;
-
        init_completion(&fsl_lpspi->xfer_done);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
        if (ret < 0)
                dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
 
-       ret = devm_spi_register_controller(&pdev->dev, controller);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "spi_register_controller error.\n");
-               goto out_controller_put;
-       }
-
        return 0;
 
 out_controller_put:
index 79b1558..e8a499c 100644 (file)
@@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
            op->data.nbytes > q->devtype_data->txfifo)
                return false;
 
-       return true;
+       return spi_mem_default_supports_op(mem, op);
 }
 
 static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
index fb4159a..3b81772 100644 (file)
@@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
        struct device_node *np = ofdev->dev.of_node;
        struct spi_master *master;
        struct resource mem;
-       int irq = 0, type;
-       int ret = -ENOMEM;
+       int irq, type;
+       int ret;
 
        ret = of_mpc8xxx_spi_probe(ofdev);
        if (ret)
@@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
 
                if (spisel_boot) {
                        pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
-                       if (!pinfo->immr_spi_cs) {
-                               ret = -ENOMEM;
-                               goto err;
-                       }
+                       if (!pinfo->immr_spi_cs)
+                               return -ENOMEM;
                }
 #endif
                /*
@@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
 
        ret = of_address_to_resource(np, 0, &mem);
        if (ret)
-               goto err;
+               return ret;
 
        irq = platform_get_irq(ofdev, 0);
-       if (irq < 0) {
-               ret = irq;
-               goto err;
-       }
+       if (irq < 0)
+               return irq;
 
        master = fsl_spi_probe(dev, &mem, irq);
-       if (IS_ERR(master)) {
-               ret = PTR_ERR(master);
-               goto err;
-       }
-
-       return 0;
 
-err:
-       return ret;
+       return PTR_ERR_OR_ZERO(master);
 }
 
 static int of_fsl_spi_remove(struct platform_device *ofdev)
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
new file mode 100644 (file)
index 0000000..4cf8fc8
--- /dev/null
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+struct hisi_sfc_v3xx_host {
+       struct device *dev;
+       void __iomem *regbase;
+       int max_cmd_dword;
+};
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US          1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US    10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+       u32 reg;
+
+       return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+                                 !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+                                 HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+                                 HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+                                       struct spi_mem_op *op)
+{
+       struct spi_device *spi = mem->spi;
+       struct hisi_sfc_v3xx_host *host;
+       uintptr_t addr = (uintptr_t)op->data.buf.in;
+       int max_byte_count;
+
+       host = spi_controller_get_devdata(spi->master);
+
+       max_byte_count = host->max_cmd_dword * 4;
+
+       if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+               op->data.nbytes = 4 - (addr % 4);
+       else if (op->data.nbytes > max_byte_count)
+               op->data.nbytes = max_byte_count;
+
+       return 0;
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+                                      u8 *to, unsigned int len)
+{
+       void __iomem *from;
+       int i;
+
+       from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+       if (IS_ALIGNED((uintptr_t)to, 4)) {
+               int words = len / 4;
+
+               __ioread32_copy(to, from, words);
+
+               len -= words * 4;
+               if (len) {
+                       u32 val;
+
+                       to += words * 4;
+                       from += words * 4;
+
+                       val = __raw_readl(from);
+
+                       for (i = 0; i < len; i++, val >>= 8, to++)
+                               *to = (u8)val;
+               }
+       } else {
+               for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+                       u32 val = __raw_readl(from);
+                       int j;
+
+                       for (j = 0; j < 4 && (j + (i * 4) < len);
+                            to++, val >>= 8, j++)
+                               *to = (u8)val;
+               }
+       }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+                                       const u8 *from, unsigned int len)
+{
+       void __iomem *to;
+       int i;
+
+       to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+       if (IS_ALIGNED((uintptr_t)from, 4)) {
+               int words = len / 4;
+
+               __iowrite32_copy(to, from, words);
+
+               len -= words * 4;
+               if (len) {
+                       u32 val = 0;
+
+                       to += words * 4;
+                       from += words * 4;
+
+                       for (i = 0; i < len; i++, from++)
+                               val |= *from << i * 8;
+                       __raw_writel(val, to);
+               }
+
+       } else {
+               for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+                       u32 val = 0;
+                       int j;
+
+                       for (j = 0; j < 4 && (j + (i * 4) < len);
+                            from++, j++)
+                               val |= *from << j * 8;
+                       __raw_writel(val, to);
+               }
+       }
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+                                        const struct spi_mem_op *op,
+                                        u8 chip_select)
+{
+       int ret, len = op->data.nbytes;
+       u32 config = 0;
+
+       if (op->addr.nbytes)
+               config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+       if (op->data.dir != SPI_MEM_NO_DATA) {
+               config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+               config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+       }
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
+       else if (op->data.dir == SPI_MEM_DATA_IN)
+               config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+       config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+                 chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+                 HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+       writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+       writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+       writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+       ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+       if (ret)
+               return ret;
+
+       if (op->data.dir == SPI_MEM_DATA_IN)
+               hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+
+       return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+                                const struct spi_mem_op *op)
+{
+       struct hisi_sfc_v3xx_host *host;
+       struct spi_device *spi = mem->spi;
+       u8 chip_select = spi->chip_select;
+
+       host = spi_controller_get_devdata(spi->master);
+
+       return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+       .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+       .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct hisi_sfc_v3xx_host *host;
+       struct spi_controller *ctlr;
+       u32 version;
+       int ret;
+
+       ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+       if (!ctlr)
+               return -ENOMEM;
+
+       ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+                         SPI_TX_DUAL | SPI_TX_QUAD;
+
+       host = spi_controller_get_devdata(ctlr);
+       host->dev = dev;
+
+       platform_set_drvdata(pdev, host);
+
+       host->regbase = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(host->regbase)) {
+               ret = PTR_ERR(host->regbase);
+               goto err_put_master;
+       }
+
+       ctlr->bus_num = -1;
+       ctlr->num_chipselect = 1;
+       ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+       version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+       switch (version) {
+       case 0x351:
+               host->max_cmd_dword = 64;
+               break;
+       default:
+               host->max_cmd_dword = 16;
+               break;
+       }
+
+       ret = devm_spi_register_controller(dev, ctlr);
+       if (ret)
+               goto err_put_master;
+
+       dev_info(&pdev->dev, "hw version 0x%x\n", version);
+
+       return 0;
+
+err_put_master:
+       spi_master_put(ctlr);
+       return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+       {"HISI0341", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+#endif
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+       .driver = {
+               .name   = "hisi-sfc-v3xx",
+               .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
+       },
+       .probe  = hisi_sfc_v3xx_probe,
+};
+
+module_platform_driver(hisi_sfc_v3xx_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
index f4a8f47..8543f5e 100644 (file)
@@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev)
        master->unprepare_message = img_spfi_unprepare;
        master->handle_err = img_spfi_handle_err;
 
-       spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
-       spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+       spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+       if (IS_ERR(spfi->tx_ch)) {
+               ret = PTR_ERR(spfi->tx_ch);
+               spfi->tx_ch = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto disable_pm;
+       }
+
+       spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+       if (IS_ERR(spfi->rx_ch)) {
+               ret = PTR_ERR(spfi->rx_ch);
+               spfi->rx_ch = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto disable_pm;
+       }
+
        if (!spfi->tx_ch || !spfi->rx_ch) {
                if (spfi->tx_ch)
                        dma_release_channel(spfi->tx_ch);
index 49f0099..f4f28a4 100644 (file)
@@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi,
        }
 
        if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
-               spi_imx->usedma = 1;
+               spi_imx->usedma = true;
        else
-               spi_imx->usedma = 0;
+               spi_imx->usedma = false;
 
        if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
                spi_imx->rx = mx53_ecspi_rx_slave;
index f3f1044..7f5680f 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/reset.h>
-#include <linux/gpio.h>
 
 /*
  * The Meson SPICC controller could support DMA based transfers, but is not
@@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
 
 static int meson_spicc_setup(struct spi_device *spi)
 {
-       int ret = 0;
-
        if (!spi->controller_state)
                spi->controller_state = spi_master_get_devdata(spi->master);
-       else if (gpio_is_valid(spi->cs_gpio))
-               goto out_gpio;
-       else if (spi->cs_gpio == -ENOENT)
-               return 0;
-
-       if (gpio_is_valid(spi->cs_gpio)) {
-               ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
-               if (ret) {
-                       dev_err(&spi->dev, "failed to request cs gpio\n");
-                       return ret;
-               }
-       }
-
-out_gpio:
-       ret = gpio_direction_output(spi->cs_gpio,
-                       !(spi->mode & SPI_CS_HIGH));
 
-       return ret;
+       return 0;
 }
 
 static void meson_spicc_cleanup(struct spi_device *spi)
 {
-       if (gpio_is_valid(spi->cs_gpio))
-               gpio_free(spi->cs_gpio);
-
        spi->controller_state = NULL;
 }
 
@@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
        master->prepare_message = meson_spicc_prepare_message;
        master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
        master->transfer_one = meson_spicc_transfer_one;
+       master->use_gpio_descriptors = true;
 
        /* Setup max rate according to the Meson GX datasheet */
        if ((rate >> 2) > SPICC_MAX_FREQ)
index 996c1c8..dce85ee 100644 (file)
@@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev)
        if (ret)
                goto out_master_free;
 
-       ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
-       if (!ssp->dmach) {
+       ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+       if (IS_ERR(ssp->dmach)) {
                dev_err(ssp->dev, "Failed to request DMA\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(ssp->dmach);
                goto out_master_free;
        }
 
index fe62473..87cd023 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/reset.h>
 
 #include <asm/unaligned.h>
 
@@ -20,7 +21,7 @@
 
 struct npcm_pspi {
        struct completion xfer_done;
-       struct regmap *rst_regmap;
+       struct reset_control *reset;
        struct spi_master *master;
        unsigned int tx_bytes;
        unsigned int rx_bytes;
@@ -59,12 +60,6 @@ struct npcm_pspi {
 #define NPCM_PSPI_MIN_CLK_DIVIDER      4
 #define NPCM_PSPI_DEFAULT_CLK          25000000
 
-/* reset register */
-#define NPCM7XX_IPSRST2_OFFSET 0x24
-
-#define NPCM7XX_PSPI1_RESET    BIT(22)
-#define NPCM7XX_PSPI2_RESET    BIT(23)
-
 static inline unsigned int bytes_per_word(unsigned int bits)
 {
        return bits <= 8 ? 1 : 2;
@@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
                priv->mode = spi->mode;
        }
 
+       /*
+        * If transfer is even length, and 8 bits per word transfer,
+        * then implement 16 bits-per-word transfer.
+        */
+       if (priv->bits_per_word == 8 && !(t->len & 0x1))
+               t->bits_per_word = 16;
+
        if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
                npcm_pspi_set_transfer_size(priv, t->bits_per_word);
                priv->bits_per_word = t->bits_per_word;
@@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
 static void npcm_pspi_send(struct npcm_pspi *priv)
 {
        int wsize;
+       u16 val;
 
        wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
        priv->tx_bytes -= wsize;
@@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv)
 
        switch (wsize) {
        case 1:
-               iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+               val = *priv->tx_buf++;
+               iowrite8(val, NPCM_PSPI_DATA + priv->base);
                break;
        case 2:
-               iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+               val = *priv->tx_buf++;
+               val = *priv->tx_buf++ | (val << 8);
+               iowrite16(val, NPCM_PSPI_DATA + priv->base);
                break;
        default:
                WARN_ON_ONCE(1);
                return;
        }
-
-       priv->tx_buf += wsize;
 }
 
 static void npcm_pspi_recv(struct npcm_pspi *priv)
@@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv)
 
        switch (rsize) {
        case 1:
-               val = ioread8(priv->base + NPCM_PSPI_DATA);
+               *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
                break;
        case 2:
                val = ioread16(priv->base + NPCM_PSPI_DATA);
+               *priv->rx_buf++ = (val >> 8);
+               *priv->rx_buf++ = val & 0xff;
                break;
        default:
                WARN_ON_ONCE(1);
                return;
        }
-
-       *priv->rx_buf = val;
-       priv->rx_buf += rsize;
 }
 
 static int npcm_pspi_transfer_one(struct spi_master *master,
@@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
 
 static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
 {
-       regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
-                    NPCM7XX_PSPI1_RESET << priv->id);
-       regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+       reset_control_assert(priv->reset);
+       udelay(5);
+       reset_control_deassert(priv->reset);
 }
 
 static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
@@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        if (num_cs < 0)
                return num_cs;
 
-       pdev->id = of_alias_get_id(np, "spi");
-       if (pdev->id < 0)
-               pdev->id = 0;
-
        master = spi_alloc_master(&pdev->dev, sizeof(*priv));
        if (!master)
                return -ENOMEM;
@@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        priv = spi_master_get_devdata(master);
        priv->master = master;
        priv->is_save_param = false;
-       priv->id = pdev->id;
 
        priv->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv->base)) {
@@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev)
                goto out_disable_clk;
        }
 
-       priv->rst_regmap =
-               syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
-       if (IS_ERR(priv->rst_regmap)) {
-               dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
-               return PTR_ERR(priv->rst_regmap);
+       priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->reset)) {
+               ret = PTR_ERR(priv->reset);
+               goto out_disable_clk;
        }
 
        /* reset SPI-HW block */
@@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
        master->mode_bits = SPI_CPHA | SPI_CPOL;
        master->dev.of_node = pdev->dev.of_node;
-       master->bus_num = pdev->id;
+       master->bus_num = -1;
        master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
        master->transfer_one = npcm_pspi_transfer_one;
        master->prepare_transfer_hardware =
@@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        if (ret)
                goto out_disable_clk;
 
-       pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+       pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
 
        return 0;
 
index e2331eb..9df7c59 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/spi/spi_bitbang.h>
 #include <linux/spi/spi_oc_tiny.h>
 #include <linux/io.h>
-#include <linux/gpio.h>
 #include <linux/of.h>
 
 #define DRV_NAME "spi_oc_tiny"
@@ -50,8 +49,6 @@ struct tiny_spi {
        unsigned int txc, rxc;
        const u8 *txp;
        u8 *rxp;
-       int gpio_cs_count;
-       int *gpio_cs;
 };
 
 static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
@@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
        return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
 }
 
-static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
-{
-       struct tiny_spi *hw = tiny_spi_to_hw(spi);
-
-       if (hw->gpio_cs_count > 0) {
-               gpio_set_value(hw->gpio_cs[spi->chip_select],
-                       (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
-       }
-}
-
 static int tiny_spi_setup_transfer(struct spi_device *spi,
                                   struct spi_transfer *t)
 {
@@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
 {
        struct tiny_spi *hw = platform_get_drvdata(pdev);
        struct device_node *np = pdev->dev.of_node;
-       unsigned int i;
        u32 val;
 
        if (!np)
                return 0;
-       hw->gpio_cs_count = of_gpio_count(np);
-       if (hw->gpio_cs_count > 0) {
-               hw->gpio_cs = devm_kcalloc(&pdev->dev,
-                               hw->gpio_cs_count, sizeof(unsigned int),
-                               GFP_KERNEL);
-               if (!hw->gpio_cs)
-                       return -ENOMEM;
-       }
-       for (i = 0; i < hw->gpio_cs_count; i++) {
-               hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
-               if (hw->gpio_cs[i] < 0)
-                       return -ENODEV;
-       }
        hw->bitbang.master->dev.of_node = pdev->dev.of_node;
        if (!of_property_read_u32(np, "clock-frequency", &val))
                hw->freq = val;
@@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
        struct tiny_spi *hw;
        struct spi_master *master;
-       unsigned int i;
        int err = -ENODEV;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
@@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev)
 
        /* setup the master state. */
        master->bus_num = pdev->id;
-       master->num_chipselect = 255;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
        master->setup = tiny_spi_setup;
+       master->use_gpio_descriptors = true;
 
        hw = spi_master_get_devdata(master);
        platform_set_drvdata(pdev, hw);
@@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        /* setup the state for the bitbang driver */
        hw->bitbang.master = master;
        hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
-       hw->bitbang.chipselect = tiny_spi_chipselect;
        hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
 
        /* find and map our resources */
@@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        }
        /* find platform data */
        if (platp) {
-               hw->gpio_cs_count = platp->gpio_cs_count;
-               hw->gpio_cs = platp->gpio_cs;
-               if (platp->gpio_cs_count && !platp->gpio_cs) {
-                       err = -EBUSY;
-                       goto exit;
-               }
                hw->freq = platp->freq;
                hw->baudwidth = platp->baudwidth;
        } else {
@@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
                if (err)
                        goto exit;
        }
-       for (i = 0; i < hw->gpio_cs_count; i++) {
-               err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
-               if (err)
-                       goto exit_gpio;
-               gpio_direction_output(hw->gpio_cs[i], 1);
-       }
-       hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
 
        /* register our spi controller */
        err = spi_bitbang_start(&hw->bitbang);
@@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
 
        return 0;
 
-exit_gpio:
-       while (i-- > 0)
-               gpio_free(hw->gpio_cs[i]);
 exit:
        spi_master_put(master);
        return err;
@@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev)
 {
        struct tiny_spi *hw = platform_get_drvdata(pdev);
        struct spi_master *master = hw->bitbang.master;
-       unsigned int i;
 
        spi_bitbang_stop(&hw->bitbang);
-       for (i = 0; i < hw->gpio_cs_count; i++)
-               gpio_free(hw->gpio_cs[i]);
        spi_master_put(master);
        return 0;
 }
index 9071333..4c7a71f 100644 (file)
@@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
        return limit;
 }
 
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+       /* On MMP, disabling SSE seems to corrupt the rx fifo */
+       if (drv_data->ssp_type == MMP2_SSP)
+               return;
+
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+}
+
 static int null_writer(struct driver_data *drv_data)
 {
        u8 n_bytes = drv_data->n_bytes;
@@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
        if (!pxa25x_ssp_comp(drv_data))
                pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
 
        dev_err(&drv_data->pdev->dev, "%s\n", msg);
 
@@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
 static void handle_bad_msg(struct driver_data *drv_data)
 {
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
        pxa2xx_spi_write(drv_data, SSCR1,
                         pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
        if (!pxa25x_ssp_comp(drv_data))
@@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
            || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
            != (cr1 & change_mask)) {
                /* stop the SSP, and update the other bits */
-               pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+               if (drv_data->ssp_type != MMP2_SSP)
+                       pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
                if (!pxa25x_ssp_comp(drv_data))
                        pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
                /* first set CR1 without interrupt and service enables */
@@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
        if (!pxa25x_ssp_comp(drv_data))
                pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
 
        dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
 
@@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
        /* Disable the SSP */
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
        /* Clear and disable interrupts and service requests */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        pxa2xx_spi_write(drv_data, SSCR1,
@@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
        /* Disable the SSP now */
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
 
        return 0;
 }
@@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        /* KBL-H */
        { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
        { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+       /* CML-V */
+       { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+       { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
        /* BXT A-Step */
        { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
index 250fd60..3c4f83b 100644 (file)
@@ -137,7 +137,7 @@ enum qspi_clocks {
 struct qcom_qspi {
        void __iomem *base;
        struct device *dev;
-       struct clk_bulk_data clks[QSPI_NUM_CLKS];
+       struct clk_bulk_data *clks;
        struct qspi_xfer xfer;
        /* Lock to protect xfer and IRQ accessed registers */
        spinlock_t lock;
@@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
                goto exit_probe_master_put;
        }
 
+       ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+                                 sizeof(*ctrl->clks), GFP_KERNEL);
+       if (!ctrl->clks) {
+               ret = -ENOMEM;
+               goto exit_probe_master_put;
+       }
+
        ctrl->clks[QSPI_CLK_CORE].id = "core";
        ctrl->clks[QSPI_CLK_IFACE].id = "iface";
        ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
index 7222c76..85575d4 100644 (file)
 #define SPCMD_SPIMOD_DUAL      SPCMD_SPIMOD0
 #define SPCMD_SPIMOD_QUAD      SPCMD_SPIMOD1
 #define SPCMD_SPRW             0x0010  /* SPI Read/Write Access (Dual/Quad) */
-#define SPCMD_SSLA_MASK                0x0030  /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_SSLA(i)          ((i) << 4)      /* SSL Assert Signal Setting */
 #define SPCMD_BRDV_MASK                0x000c  /* Bit Rate Division Setting */
 #define SPCMD_CPOL             0x0002  /* Clock Polarity Setting */
 #define SPCMD_CPHA             0x0001  /* Clock Phase Setting */
@@ -242,6 +242,7 @@ struct spi_ops {
        u16 mode_bits;
        u16 flags;
        u16 fifo_size;
+       u8 num_hw_ss;
 };
 
 /*
@@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
        return n;
 }
 
-#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-
 static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
 {
        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -620,9 +619,8 @@ no_dma_tx:
                dmaengine_terminate_all(rspi->ctlr->dma_rx);
 no_dma_rx:
        if (ret == -EAGAIN) {
-               pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
-                            dev_driver_string(&rspi->ctlr->dev),
-                            dev_name(&rspi->ctlr->dev));
+               dev_warn_once(&rspi->ctlr->dev,
+                             "DMA not available, falling back to PIO\n");
        }
        return ret;
 }
@@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
        if (spi->mode & SPI_CPHA)
                rspi->spcmd |= SPCMD_CPHA;
 
+       /* Configure slave signal to assert */
+       rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+                                               : spi->chip_select);
+
        /* CMOS output mode and MOSI signal from previous transfer */
        rspi->sppcr = 0;
        if (spi->mode & SPI_LOOP)
                rspi->sppcr |= SPPCR_SPLP;
 
-       set_config_register(rspi, 8);
+       rspi->ops->set_config_register(rspi, 8);
 
        if (msg->spi->mode &
            (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
@@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = {
        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
        .flags =                SPI_CONTROLLER_MUST_TX,
        .fifo_size =            8,
+       .num_hw_ss =            2,
 };
 
 static const struct spi_ops rspi_rz_ops = {
@@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = {
        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
        .flags =                SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
        .fifo_size =            8,      /* 8 for TX, 32 for RX */
+       .num_hw_ss =            1,
 };
 
 static const struct spi_ops qspi_ops = {
@@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = {
                                SPI_RX_DUAL | SPI_RX_QUAD,
        .flags =                SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
        .fifo_size =            32,
+       .num_hw_ss =            1,
 };
 
 #ifdef CONFIG_OF
@@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev)
        ctlr->mode_bits = ops->mode_bits;
        ctlr->flags = ops->flags;
        ctlr->dev.of_node = pdev->dev.of_node;
+       ctlr->use_gpio_descriptors = true;
+       ctlr->max_native_cs = rspi->ops->num_hw_ss;
 
        ret = platform_get_irq_byname_optional(pdev, "rx");
        if (ret < 0) {
@@ -1314,8 +1321,6 @@ error1:
 
 static const struct platform_device_id spi_driver_ids[] = {
        { "rspi",       (kernel_ulong_t)&rspi_ops },
-       { "rspi-rz",    (kernel_ulong_t)&rspi_rz_ops },
-       { "qspi",       (kernel_ulong_t)&qspi_ops },
        {},
 };
 
index 8f13473..1c11a00 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 #include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -55,7 +53,6 @@ struct sh_msiof_spi_priv {
        void *rx_dma_page;
        dma_addr_t tx_dma_addr;
        dma_addr_t rx_dma_addr;
-       unsigned short unused_ss;
        bool native_cs_inited;
        bool native_cs_high;
        bool slave_aborted;
@@ -63,140 +60,140 @@ struct sh_msiof_spi_priv {
 
 #define MAX_SS 3       /* Maximum number of native chip selects */
 
-#define TMDR1  0x00    /* Transmit Mode Register 1 */
-#define TMDR2  0x04    /* Transmit Mode Register 2 */
-#define TMDR3  0x08    /* Transmit Mode Register 3 */
-#define RMDR1  0x10    /* Receive Mode Register 1 */
-#define RMDR2  0x14    /* Receive Mode Register 2 */
-#define RMDR3  0x18    /* Receive Mode Register 3 */
-#define TSCR   0x20    /* Transmit Clock Select Register */
-#define RSCR   0x22    /* Receive Clock Select Register (SH, A1, APE6) */
-#define CTR    0x28    /* Control Register */
-#define FCTR   0x30    /* FIFO Control Register */
-#define STR    0x40    /* Status Register */
-#define IER    0x44    /* Interrupt Enable Register */
-#define TDR1   0x48    /* Transmit Control Data Register 1 (SH, A1) */
-#define TDR2   0x4c    /* Transmit Control Data Register 2 (SH, A1) */
-#define TFDR   0x50    /* Transmit FIFO Data Register */
-#define RDR1   0x58    /* Receive Control Data Register 1 (SH, A1) */
-#define RDR2   0x5c    /* Receive Control Data Register 2 (SH, A1) */
-#define RFDR   0x60    /* Receive FIFO Data Register */
-
-/* TMDR1 and RMDR1 */
-#define MDR1_TRMD         BIT(31)  /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK   GENMASK(29, 28) /* SYNC Mode */
-#define MDR1_SYNCMD_SPI           (2 << 28)/*   Level mode/SPI */
-#define MDR1_SYNCMD_LR    (3 << 28)/*   L/R mode */
-#define MDR1_SYNCAC_SHIFT  25       /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT  24       /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT           20       /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT  16       /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK     GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT    2
-#define MDR1_XXSTP        BIT(0)   /* Transmission/Reception Stop on FIFO */
-/* TMDR1 */
-#define TMDR1_PCON        BIT(30)  /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK  GENMASK(27, 26) /* Sync Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26       /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* TMDR2 and RMDR2 */
-#define MDR2_BITLEN1(i)        (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1  BIT(0)      /* Group Output Mask 1 (SH, A1) */
-
-/* TSCR and RSCR */
-#define SCR_BRPS_MASK  GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SCR_BRPS(i)    (((i) - 1) << 8)
-#define SCR_BRDV_MASK  GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0
-#define SCR_BRDV_DIV_4 1
-#define SCR_BRDV_DIV_8 2
-#define SCR_BRDV_DIV_16        3
-#define SCR_BRDV_DIV_32        4
-#define SCR_BRDV_DIV_1 7
-
-/* CTR */
-#define CTR_TSCKIZ_MASK        GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK BIT(31)   /*   Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30   /*   Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK        GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK BIT(29)   /*   Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28   /*   Receive Clock Polarity */
-#define CTR_TEDG_SHIFT      27   /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT      26   /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW  (0 << 22) /*   0 */
-#define CTR_TXDIZ_HIGH (1 << 22) /*   1 */
-#define CTR_TXDIZ_HIZ  (2 << 22) /*   High-impedance */
-#define CTR_TSCKE      BIT(15)   /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE       BIT(14)   /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE                BIT(9)    /* Transmit Enable */
-#define CTR_RXE                BIT(8)    /* Receive Enable */
-#define CTR_TXRST      BIT(1)    /* Transmit Reset */
-#define CTR_RXRST      BIT(0)    /* Receive Reset */
-
-/* FCTR */
-#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64   (0 << 29) /*  Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32   (1 << 29) /*  Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24   (2 << 29) /*  Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16   (3 << 29) /*  Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12   (4 << 29) /*  Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8    (5 << 29) /*  Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4    (6 << 29) /*  Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1    (7 << 29) /*  Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT        20
-#define FCTR_TFUA(i)   ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define FCTR_RFWM_1    (0 << 13) /*  Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4    (1 << 13) /*  Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8    (2 << 13) /*  Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16   (3 << 13) /*  Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32   (4 << 13) /*  Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64   (5 << 13) /*  Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128  (6 << 13) /*  Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256  (7 << 13) /*  Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT        4
-#define FCTR_RFUA(i)   ((i) << FCTR_RFUA_SHIFT)
-
-/* STR */
-#define STR_TFEMP      BIT(29) /* Transmit FIFO Empty */
-#define STR_TDREQ      BIT(28) /* Transmit Data Transfer Request */
-#define STR_TEOF       BIT(23) /* Frame Transmission End */
-#define STR_TFSERR     BIT(21) /* Transmit Frame Synchronization Error */
-#define STR_TFOVF      BIT(20) /* Transmit FIFO Overflow */
-#define STR_TFUDF      BIT(19) /* Transmit FIFO Underflow */
-#define STR_RFFUL      BIT(13) /* Receive FIFO Full */
-#define STR_RDREQ      BIT(12) /* Receive Data Transfer Request */
-#define STR_REOF       BIT(7)  /* Frame Reception End */
-#define STR_RFSERR     BIT(5)  /* Receive Frame Synchronization Error */
-#define STR_RFUDF      BIT(4)  /* Receive FIFO Underflow */
-#define STR_RFOVF      BIT(3)  /* Receive FIFO Overflow */
-
-/* IER */
-#define IER_TDMAE      BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE     BIT(29) /* Transmit FIFO Empty Enable */
-#define IER_TDREQE     BIT(28) /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE      BIT(23) /* Frame Transmission End Enable */
-#define IER_TFSERRE    BIT(21) /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE     BIT(20) /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE     BIT(19) /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE      BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE     BIT(13) /* Receive FIFO Full Enable */
-#define IER_RDREQE     BIT(12) /* Receive Data Transfer Request Enable */
-#define IER_REOFE      BIT(7)  /* Frame Reception End Enable */
-#define IER_RFSERRE    BIT(5)  /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE     BIT(4)  /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE     BIT(3)  /* Receive FIFO Overflow Enable */
+#define SITMDR1        0x00    /* Transmit Mode Register 1 */
+#define SITMDR2        0x04    /* Transmit Mode Register 2 */
+#define SITMDR3        0x08    /* Transmit Mode Register 3 */
+#define SIRMDR1        0x10    /* Receive Mode Register 1 */
+#define SIRMDR2        0x14    /* Receive Mode Register 2 */
+#define SIRMDR3        0x18    /* Receive Mode Register 3 */
+#define SITSCR 0x20    /* Transmit Clock Select Register */
+#define SIRSCR 0x22    /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR  0x28    /* Control Register */
+#define SIFCTR 0x30    /* FIFO Control Register */
+#define SISTR  0x40    /* Status Register */
+#define SIIER  0x44    /* Interrupt Enable Register */
+#define SITDR1 0x48    /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c    /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50    /* Transmit FIFO Data Register */
+#define SIRDR1 0x58    /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c    /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60    /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD            BIT(31)         /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK     GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI      (2 << 28)       /*   Level mode/SPI */
+#define SIMDR1_SYNCMD_LR       (3 << 28)       /*   L/R mode */
+#define SIMDR1_SYNCAC_SHIFT    25              /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT    24              /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT      20              /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT    16              /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK                GENMASK(3, 2)   /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT       2
+#define SIMDR1_XXSTP           BIT(0)          /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON           BIT(30)         /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK    GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT   26              /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i)      (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i)       (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1                BIT(0)          /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK                GENMASK(12, 8)  /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i)          (((i) - 1) << 8)
+#define SISCR_BRDV_MASK                GENMASK(2, 0)   /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2       0
+#define SISCR_BRDV_DIV_4       1
+#define SISCR_BRDV_DIV_8       2
+#define SISCR_BRDV_DIV_16      3
+#define SISCR_BRDV_DIV_32      4
+#define SISCR_BRDV_DIV_1       7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK      GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK       BIT(31)         /*   Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30              /*   Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK      GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK       BIT(29)         /*   Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28              /*   Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT       27              /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT       26              /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK       GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW                (0 << 22)       /*   0 */
+#define SICTR_TXDIZ_HIGH       (1 << 22)       /*   1 */
+#define SICTR_TXDIZ_HIZ                (2 << 22)       /*   High-impedance */
+#define SICTR_TSCKE            BIT(15)         /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE             BIT(14)         /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE              BIT(9)          /* Transmit Enable */
+#define SICTR_RXE              BIT(8)          /* Receive Enable */
+#define SICTR_TXRST            BIT(1)          /* Transmit Reset */
+#define SICTR_RXRST            BIT(0)          /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK       GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64         (0 << 29)       /*  Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32         (1 << 29)       /*  Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24         (2 << 29)       /*  Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16         (3 << 29)       /*  Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12         (4 << 29)       /*  Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8          (5 << 29)       /*  Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4          (6 << 29)       /*  Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1          (7 << 29)       /*  Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK       GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT      20
+#define SIFCTR_TFUA(i)         ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK       GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1          (0 << 13)       /*  Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4          (1 << 13)       /*  Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8          (2 << 13)       /*  Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16         (3 << 13)       /*  Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32         (4 << 13)       /*  Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64         (5 << 13)       /*  Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128                (6 << 13)       /*  Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256                (7 << 13)       /*  Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK       GENMASK(12, 4)  /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT      4
+#define SIFCTR_RFUA(i)         ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP            BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ            BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF             BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR           BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF            BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF            BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL            BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ            BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF             BIT(7)  /* Frame Reception End */
+#define SISTR_RFSERR           BIT(5)  /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF            BIT(4)  /* Receive FIFO Underflow */
+#define SISTR_RFOVF            BIT(3)  /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE            BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE           BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE           BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE            BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE          BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE           BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE           BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE            BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE           BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE           BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE            BIT(7)  /* Frame Reception End Enable */
+#define SIIER_RFSERRE          BIT(5)  /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE           BIT(4)  /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE           BIT(3)  /* Receive FIFO Overflow Enable */
 
 
 static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
 {
        switch (reg_offs) {
-       case TSCR:
-       case RSCR:
+       case SITSCR:
+       case SIRSCR:
                return ioread16(p->mapbase + reg_offs);
        default:
                return ioread32(p->mapbase + reg_offs);
@@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
                           u32 value)
 {
        switch (reg_offs) {
-       case TSCR:
-       case RSCR:
+       case SITSCR:
+       case SIRSCR:
                iowrite16(value, p->mapbase + reg_offs);
                break;
        default:
@@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
        u32 mask = clr | set;
        u32 data;
 
-       data = sh_msiof_read(p, CTR);
+       data = sh_msiof_read(p, SICTR);
        data &= ~clr;
        data |= set;
-       sh_msiof_write(p, CTR, data);
+       sh_msiof_write(p, SICTR, data);
 
-       return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+       return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
                                         (data & mask) == set, 1, 100);
 }
 
@@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
        struct sh_msiof_spi_priv *p = data;
 
        /* just disable the interrupt and wake up */
-       sh_msiof_write(p, IER, 0);
+       sh_msiof_write(p, SIIER, 0);
        complete(&p->done);
 
        return IRQ_HANDLED;
@@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 
 static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
 {
-       u32 mask = CTR_TXRST | CTR_RXRST;
+       u32 mask = SICTR_TXRST | SICTR_RXRST;
        u32 data;
 
-       data = sh_msiof_read(p, CTR);
+       data = sh_msiof_read(p, SICTR);
        data |= mask;
-       sh_msiof_write(p, CTR, data);
+       sh_msiof_write(p, SICTR, data);
 
-       readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+       readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
                                  100);
 }
 
 static const u32 sh_msiof_spi_div_array[] = {
-       SCR_BRDV_DIV_1, SCR_BRDV_DIV_2,  SCR_BRDV_DIV_4,
-       SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
+       SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+       SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
 };
 
 static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
@@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 
        div = DIV_ROUND_UP(parent_rate, spi_hz);
        if (div <= 1024) {
-               /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+               /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
                if (!div_pow && div <= 32 && div > 2)
                        div_pow = 1;
 
@@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
                brps = 32;
        }
 
-       scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
-       sh_msiof_write(p, TSCR, scr);
+       scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+       sh_msiof_write(p, SITSCR, scr);
        if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
-               sh_msiof_write(p, RSCR, scr);
+               sh_msiof_write(p, SIRSCR, scr);
 }
 
 static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
@@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
                return 0;
        }
 
-       val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
-       val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+       val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+       val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
 
        return val;
 }
@@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
         *    1    0         11     11    0    0
         *    1    1         11     11    1    1
         */
-       tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
-       tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
-       tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+       tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+       tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+       tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
        tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
        if (spi_controller_is_slave(p->ctlr)) {
-               sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+               sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
        } else {
-               sh_msiof_write(p, TMDR1,
-                              tmp | MDR1_TRMD | TMDR1_PCON |
-                              (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+               sh_msiof_write(p, SITMDR1,
+                              tmp | SIMDR1_TRMD | SITMDR1_PCON |
+                              (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
        }
        if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
                /* These bits are reserved if RX needs TX */
                tmp &= ~0x0000ffff;
        }
-       sh_msiof_write(p, RMDR1, tmp);
+       sh_msiof_write(p, SIRMDR1, tmp);
 
        tmp = 0;
-       tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
-       tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+       tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+       tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
 
        edge = cpol ^ !cpha;
 
-       tmp |= edge << CTR_TEDG_SHIFT;
-       tmp |= edge << CTR_REDG_SHIFT;
-       tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
-       sh_msiof_write(p, CTR, tmp);
+       tmp |= edge << SICTR_TEDG_SHIFT;
+       tmp |= edge << SICTR_REDG_SHIFT;
+       tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+       sh_msiof_write(p, SICTR, tmp);
 }
 
 static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
                                       const void *tx_buf, void *rx_buf,
                                       u32 bits, u32 words)
 {
-       u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+       u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
 
        if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
-               sh_msiof_write(p, TMDR2, dr2);
+               sh_msiof_write(p, SITMDR2, dr2);
        else
-               sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+               sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
 
        if (rx_buf)
-               sh_msiof_write(p, RMDR2, dr2);
+               sh_msiof_write(p, SIRMDR2, dr2);
 }
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR,
-                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
+       sh_msiof_write(p, SISTR,
+                      sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, buf_8[k] << fs);
+               sh_msiof_write(p, SITFDR, buf_8[k] << fs);
 }
 
 static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
@@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, buf_16[k] << fs);
+               sh_msiof_write(p, SITFDR, buf_16[k] << fs);
 }
 
 static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+               sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
 }
 
 static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
@@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, buf_32[k] << fs);
+               sh_msiof_write(p, SITFDR, buf_32[k] << fs);
 }
 
 static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+               sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
 }
 
 static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+               sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
 }
 
 static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+               sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
 }
 
 static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
@@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+               buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
 }
 
 static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
@@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+               buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
 }
 
 static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+               put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
 }
 
 static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
@@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+               buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
 }
 
 static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+               put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
 }
 
 static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+               buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
 }
 
 static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+               put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
 }
 
 static int sh_msiof_spi_setup(struct spi_device *spi)
@@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
                return 0;
 
        /* Configure native chip select mode/polarity early */
-       clr = MDR1_SYNCMD_MASK;
-       set = MDR1_SYNCMD_SPI;
+       clr = SIMDR1_SYNCMD_MASK;
+       set = SIMDR1_SYNCMD_SPI;
        if (spi->mode & SPI_CS_HIGH)
-               clr |= BIT(MDR1_SYNCAC_SHIFT);
+               clr |= BIT(SIMDR1_SYNCAC_SHIFT);
        else
-               set |= BIT(MDR1_SYNCAC_SHIFT);
+               set |= BIT(SIMDR1_SYNCAC_SHIFT);
        pm_runtime_get_sync(&p->pdev->dev);
-       tmp = sh_msiof_read(p, TMDR1) & ~clr;
-       sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
-       tmp = sh_msiof_read(p, RMDR1) & ~clr;
-       sh_msiof_write(p, RMDR1, tmp | set);
+       tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+       sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+       tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+       sh_msiof_write(p, SIRMDR1, tmp | set);
        pm_runtime_put(&p->pdev->dev);
        p->native_cs_high = spi->mode & SPI_CS_HIGH;
        p->native_cs_inited = true;
@@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
 
        /* Configure pins before asserting CS */
        if (spi->cs_gpiod) {
-               ss = p->unused_ss;
+               ss = ctlr->unused_native_cs;
                cs_high = p->native_cs_high;
        } else {
                ss = spi->chip_select;
@@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 
        /* setup clock and rx/tx signals */
        if (!slave)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
        if (rx_buf && !ret)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
        if (!ret)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
 
        /* start by setting frame bit */
        if (!ret && !slave)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
 
        return ret;
 }
@@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 
        /* shut down frame, rx/tx and clock signals */
        if (!slave)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
        if (!ret)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
        if (rx_buf && !ret)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
        if (!ret && !slave)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
 
        return ret;
 }
@@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
        fifo_shift = 32 - bits;
 
        /* default FIFO watermarks for PIO */
-       sh_msiof_write(p, FCTR, 0);
+       sh_msiof_write(p, SIFCTR, 0);
 
        /* setup msiof transfer mode registers */
        sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
-       sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+       sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
 
        /* write tx fifo */
        if (tx_buf)
@@ -731,7 +728,7 @@ stop_reset:
        sh_msiof_reset_str(p);
        sh_msiof_spi_stop(p, rx_buf);
 stop_ier:
-       sh_msiof_write(p, IER, 0);
+       sh_msiof_write(p, SIIER, 0);
        return ret;
 }
 
@@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 
        /* First prepare and submit the DMA request(s), as this may fail */
        if (rx) {
-               ier_bits |= IER_RDREQE | IER_RDMAE;
+               ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
                desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
                                        p->rx_dma_addr, len, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
        }
 
        if (tx) {
-               ier_bits |= IER_TDREQE | IER_TDMAE;
+               ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
                dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
                                           p->tx_dma_addr, len, DMA_TO_DEVICE);
                desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
@@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
        }
 
        /* 1 stage FIFO watermarks for DMA */
-       sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+       sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
 
        /* setup msiof transfer mode registers (32-bit words) */
        sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
 
-       sh_msiof_write(p, IER, ier_bits);
+       sh_msiof_write(p, SIIER, ier_bits);
 
        reinit_completion(&p->done);
        if (tx)
@@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
                if (ret)
                        goto stop_reset;
 
-               sh_msiof_write(p, IER, 0);
+               sh_msiof_write(p, SIIER, 0);
        } else {
                /* wait for tx fifo to be emptied */
-               sh_msiof_write(p, IER, IER_TEOFE);
+               sh_msiof_write(p, SIIER, SIIER_TEOFE);
                ret = sh_msiof_wait_for_completion(p, &p->done);
                if (ret)
                        goto stop_reset;
@@ -856,7 +853,7 @@ stop_dma:
 no_dma_tx:
        if (rx)
                dmaengine_terminate_all(p->ctlr->dma_rx);
-       sh_msiof_write(p, IER, 0);
+       sh_msiof_write(p, SIIER, 0);
        return ret;
 }
 
@@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
 }
 #endif
 
-static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
-{
-       struct device *dev = &p->pdev->dev;
-       unsigned int used_ss_mask = 0;
-       unsigned int cs_gpios = 0;
-       unsigned int num_cs, i;
-       int ret;
-
-       ret = gpiod_count(dev, "cs");
-       if (ret <= 0)
-               return 0;
-
-       num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
-       for (i = 0; i < num_cs; i++) {
-               struct gpio_desc *gpiod;
-
-               gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
-               if (!IS_ERR(gpiod)) {
-                       devm_gpiod_put(dev, gpiod);
-                       cs_gpios++;
-                       continue;
-               }
-
-               if (PTR_ERR(gpiod) != -ENOENT)
-                       return PTR_ERR(gpiod);
-
-               if (i >= MAX_SS) {
-                       dev_err(dev, "Invalid native chip select %d\n", i);
-                       return -EINVAL;
-               }
-               used_ss_mask |= BIT(i);
-       }
-       p->unused_ss = ffz(used_ss_mask);
-       if (cs_gpios && p->unused_ss >= MAX_SS) {
-               dev_err(dev, "No unused native chip select available\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
 static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
        enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
 {
@@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
 
        ctlr = p->ctlr;
        ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
-                                                dma_tx_id, res->start + TFDR);
+                                                dma_tx_id, res->start + SITFDR);
        if (!ctlr->dma_tx)
                return -ENODEV;
 
        ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
-                                                dma_rx_id, res->start + RFDR);
+                                                dma_rx_id, res->start + SIRFDR);
        if (!ctlr->dma_rx)
                goto free_tx_chan;
 
@@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
        if (p->info->rx_fifo_override)
                p->rx_fifo_size = p->info->rx_fifo_override;
 
-       /* Setup GPIO chip selects */
-       ctlr->num_chipselect = p->info->num_chipselect;
-       ret = sh_msiof_get_cs_gpios(p);
-       if (ret)
-               goto err1;
-
        /* init controller code */
        ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
        ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
        ctlr->flags = chipdata->ctlr_flags;
        ctlr->bus_num = pdev->id;
+       ctlr->num_chipselect = p->info->num_chipselect;
        ctlr->dev.of_node = pdev->dev.of_node;
        ctlr->setup = sh_msiof_spi_setup;
        ctlr->prepare_message = sh_msiof_prepare_message;
@@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
        ctlr->auto_runtime_pm = true;
        ctlr->transfer_one = sh_msiof_transfer_one;
        ctlr->use_gpio_descriptors = true;
+       ctlr->max_native_cs = MAX_SS;
 
        ret = sh_msiof_request_dma(p);
        if (ret < 0)
index e1e6391..8419e67 100644 (file)
@@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
        sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
 
        /* request DMA channels */
-       sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
-       if (!sspi->rx_chan) {
+       sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
+       if (IS_ERR(sspi->rx_chan)) {
                dev_err(&pdev->dev, "can not allocate rx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(sspi->rx_chan);
                goto free_master;
        }
-       sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
-       if (!sspi->tx_chan) {
+       sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
+       if (IS_ERR(sspi->tx_chan)) {
                dev_err(&pdev->dev, "can not allocate tx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(sspi->tx_chan);
                goto free_rx_dma;
        }
 
index 4e72692..4ef569b 100644 (file)
@@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
        return 0;
 }
 
-static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
 {
        struct dma_slave_config dma_cfg;
        struct device *dev = qspi->dev;
+       int ret = 0;
 
        memset(&dma_cfg, 0, sizeof(dma_cfg));
 
@@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
        dma_cfg.src_maxburst = 4;
        dma_cfg.dst_maxburst = 4;
 
-       qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
-       if (qspi->dma_chrx) {
+       qspi->dma_chrx = dma_request_chan(dev, "rx");
+       if (IS_ERR(qspi->dma_chrx)) {
+               ret = PTR_ERR(qspi->dma_chrx);
+               qspi->dma_chrx = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto out;
+       } else {
                if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
                        dev_err(dev, "dma rx config failed\n");
                        dma_release_channel(qspi->dma_chrx);
@@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
                }
        }
 
-       qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
-       if (qspi->dma_chtx) {
+       qspi->dma_chtx = dma_request_chan(dev, "tx");
+       if (IS_ERR(qspi->dma_chtx)) {
+               ret = PTR_ERR(qspi->dma_chtx);
+               qspi->dma_chtx = NULL;
+       } else {
                if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
                        dev_err(dev, "dma tx config failed\n");
                        dma_release_channel(qspi->dma_chtx);
@@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
                }
        }
 
+out:
        init_completion(&qspi->dma_completion);
+
+       if (ret != -EPROBE_DEFER)
+               ret = 0;
+
+       return ret;
 }
 
 static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
@@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev)
 
        qspi->dev = dev;
        platform_set_drvdata(pdev, qspi);
-       stm32_qspi_dma_setup(qspi);
+       ret = stm32_qspi_dma_setup(qspi);
+       if (ret)
+               goto err;
+
        mutex_init(&qspi->lock);
 
        ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
index b222ce8..e041f9c 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dmaengine.h>
-#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
@@ -973,29 +972,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/**
- * stm32_spi_setup - setup device chip select
- */
-static int stm32_spi_setup(struct spi_device *spi_dev)
-{
-       int ret = 0;
-
-       if (!gpio_is_valid(spi_dev->cs_gpio)) {
-               dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
-                       spi_dev->cs_gpio);
-               return -EINVAL;
-       }
-
-       dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
-               spi_dev->cs_gpio,
-               (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
-
-       ret = gpio_direction_output(spi_dev->cs_gpio,
-                                   !(spi_dev->mode & SPI_CS_HIGH));
-
-       return ret;
-}
-
 /**
  * stm32_spi_prepare_msg - set up the controller to transfer a single message
  */
@@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct stm32_spi *spi;
        struct resource *res;
-       int i, ret;
+       int ret;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
        if (!master) {
@@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev)
        master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
        master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
        master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
-       master->setup = stm32_spi_setup;
+       master->use_gpio_descriptors = true;
        master->prepare_message = stm32_spi_prepare_msg;
        master->transfer_one = stm32_spi_transfer_one;
        master->unprepare_message = stm32_spi_unprepare_msg;
 
-       spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
-       if (!spi->dma_tx)
+       spi->dma_tx = dma_request_chan(spi->dev, "tx");
+       if (IS_ERR(spi->dma_tx)) {
+               ret = PTR_ERR(spi->dma_tx);
+               spi->dma_tx = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto err_clk_disable;
+
                dev_warn(&pdev->dev, "failed to request tx dma channel\n");
-       else
+       } else {
                master->dma_tx = spi->dma_tx;
+       }
+
+       spi->dma_rx = dma_request_chan(spi->dev, "rx");
+       if (IS_ERR(spi->dma_rx)) {
+               ret = PTR_ERR(spi->dma_rx);
+               spi->dma_rx = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto err_dma_release;
 
-       spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
-       if (!spi->dma_rx)
                dev_warn(&pdev->dev, "failed to request rx dma channel\n");
-       else
+       } else {
                master->dma_rx = spi->dma_rx;
+       }
 
        if (spi->dma_tx || spi->dma_rx)
                master->can_dma = stm32_spi_can_dma;
@@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "spi master registration failed: %d\n",
                        ret);
-               goto err_dma_release;
+               goto err_pm_disable;
        }
 
-       if (!master->cs_gpios) {
+       if (!master->cs_gpiods) {
                dev_err(&pdev->dev, "no CS gpios available\n");
                ret = -EINVAL;
-               goto err_dma_release;
-       }
-
-       for (i = 0; i < master->num_chipselect; i++) {
-               if (!gpio_is_valid(master->cs_gpios[i])) {
-                       dev_err(&pdev->dev, "%i is not a valid gpio\n",
-                               master->cs_gpios[i]);
-                       ret = -EINVAL;
-                       goto err_dma_release;
-               }
-
-               ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
-                                       DRIVER_NAME);
-               if (ret) {
-                       dev_err(&pdev->dev, "can't get CS gpio %i\n",
-                               master->cs_gpios[i]);
-                       goto err_dma_release;
-               }
+               goto err_pm_disable;
        }
 
        dev_info(&pdev->dev, "driver initialized\n");
 
        return 0;
 
+err_pm_disable:
+       pm_runtime_disable(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
        if (spi->dma_rx)
                dma_release_channel(spi->dma_rx);
-
-       pm_runtime_disable(&pdev->dev);
 err_clk_disable:
        clk_disable_unprepare(spi->clk);
 err_master_put:
index fc40ab1..83edabd 100644 (file)
@@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
 
        if ((bits_per_word == 8 || bits_per_word == 16 ||
             bits_per_word == 32) && t->len > 3) {
-               tspi->is_packed = 1;
+               tspi->is_packed = true;
                tspi->words_per_32bit = 32/bits_per_word;
        } else {
-               tspi->is_packed = 0;
+               tspi->is_packed = false;
                tspi->words_per_32bit = 1;
        }
 
index 66dcb61..366a3e5 100644 (file)
@@ -80,8 +80,6 @@ struct ti_qspi {
 
 #define QSPI_COMPLETION_TIMEOUT                msecs_to_jiffies(2000)
 
-#define QSPI_FCLK                      192000000
-
 /* Clock Control */
 #define QSPI_CLK_EN                    (1 << 31)
 #define QSPI_CLK_DIV_MAX               0xffff
@@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
 {
        int wlen;
        unsigned int cmd;
+       u32 rx;
+       u8 rxlen, rx_wlen;
        u8 *rxbuf;
 
        rxbuf = t->rx_buf;
@@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
                break;
        }
        wlen = t->bits_per_word >> 3;   /* in bytes */
+       rx_wlen = wlen;
 
        while (count) {
                dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
                if (qspi_is_busy(qspi))
                        return -EBUSY;
 
+               switch (wlen) {
+               case 1:
+                       /*
+                        * Optimize the 8-bit words transfers, as used by
+                        * the SPI flash devices.
+                        */
+                       if (count >= QSPI_WLEN_MAX_BYTES) {
+                               rxlen = QSPI_WLEN_MAX_BYTES;
+                       } else {
+                               rxlen = min(count, 4);
+                       }
+                       rx_wlen = rxlen << 3;
+                       cmd &= ~QSPI_WLEN_MASK;
+                       cmd |= QSPI_WLEN(rx_wlen);
+                       break;
+               default:
+                       rxlen = wlen;
+                       break;
+               }
+
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
                if (ti_qspi_poll_wc(qspi)) {
                        dev_err(qspi->dev, "read timed out\n");
                        return -ETIMEDOUT;
                }
+
                switch (wlen) {
                case 1:
-                       *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+                       /*
+                        * Optimize the 8-bit words transfers, as used by
+                        * the SPI flash devices.
+                        */
+                       if (count >= QSPI_WLEN_MAX_BYTES) {
+                               u32 *rxp = (u32 *) rxbuf;
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+                               *rxp++ = be32_to_cpu(rx);
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+                               *rxp++ = be32_to_cpu(rx);
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+                               *rxp++ = be32_to_cpu(rx);
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+                               *rxp++ = be32_to_cpu(rx);
+                       } else {
+                               u8 *rxp = rxbuf;
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+                               if (rx_wlen >= 8)
+                                       *rxp++ = rx >> (rx_wlen - 8);
+                               if (rx_wlen >= 16)
+                                       *rxp++ = rx >> (rx_wlen - 16);
+                               if (rx_wlen >= 24)
+                                       *rxp++ = rx >> (rx_wlen - 24);
+                               if (rx_wlen >= 32)
+                                       *rxp++ = rx;
+                       }
                        break;
                case 2:
                        *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
@@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
                        *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
                        break;
                }
-               rxbuf += wlen;
-               count -= wlen;
+               rxbuf += rxlen;
+               count -= rxlen;
        }
 
        return 0;
@@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
                      QSPI_SPI_SETUP_REG(spi->chip_select));
 }
 
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+       struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+       size_t max_len;
+
+       if (op->data.dir == SPI_MEM_DATA_IN) {
+               if (op->addr.val < qspi->mmap_size) {
+                       /* Limit MMIO to the mmaped region */
+                       if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+                               max_len = qspi->mmap_size - op->addr.val;
+                               op->data.nbytes = min((size_t) op->data.nbytes,
+                                                     max_len);
+                       }
+               } else {
+                       /*
+                        * Use fallback mode (SW generated transfers) above the
+                        * mmaped region.
+                        * Adjust size to comply with the QSPI max frame length.
+                        */
+                       max_len = QSPI_FRAME;
+                       max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+                       op->data.nbytes = min((size_t) op->data.nbytes,
+                                             max_len);
+               }
+       }
+
+       return 0;
+}
+
 static int ti_qspi_exec_mem_op(struct spi_mem *mem,
                               const struct spi_mem_op *op)
 {
@@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
 
 static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
        .exec_op = ti_qspi_exec_mem_op,
+       .adjust_op_size = ti_qspi_adjust_op_size,
 };
 
 static int ti_qspi_start_transfer_one(struct spi_master *master,
index 223353f..d7ea6af 100644 (file)
@@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
        /* Set Tx DMA */
        param = &dma->param_tx;
        param->dma_dev = &dma_dev->dev;
-       param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+       param->chan_id = data->ch * 2; /* Tx = 0, 2 */
        param->tx_reg = data->io_base_addr + PCH_SPDWR;
        param->width = width;
        chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
        /* Set Rx DMA */
        param = &dma->param_rx;
        param->dma_dev = &dma_dev->dev;
-       param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+       param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
        param->rx_reg = data->io_base_addr + PCH_SPDRR;
        param->width = width;
        chan = dma_request_channel(mask, pch_spi_filter, param);
index ce9b301..0fa5097 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -23,6 +24,7 @@
 
 struct uniphier_spi_priv {
        void __iomem *base;
+       dma_addr_t base_dma_addr;
        struct clk *clk;
        struct spi_master *master;
        struct completion xfer_done;
@@ -32,6 +34,7 @@ struct uniphier_spi_priv {
        unsigned int rx_bytes;
        const u8 *tx_buf;
        u8 *rx_buf;
+       atomic_t dma_busy;
 
        bool is_save_param;
        u8 bits_per_word;
@@ -61,11 +64,16 @@ struct uniphier_spi_priv {
 #define   SSI_FPS_FSTRT                BIT(14)
 
 #define SSI_SR                 0x14
+#define   SSI_SR_BUSY          BIT(7)
 #define   SSI_SR_RNE           BIT(0)
 
 #define SSI_IE                 0x18
+#define   SSI_IE_TCIE          BIT(4)
 #define   SSI_IE_RCIE          BIT(3)
+#define   SSI_IE_TXRE          BIT(2)
+#define   SSI_IE_RXRE          BIT(1)
 #define   SSI_IE_RORIE         BIT(0)
+#define   SSI_IE_ALL_MASK      GENMASK(4, 0)
 
 #define SSI_IS                 0x1c
 #define   SSI_IS_RXRS          BIT(9)
@@ -87,15 +95,19 @@ struct uniphier_spi_priv {
 #define SSI_RXDR               0x24
 
 #define SSI_FIFO_DEPTH         8U
+#define SSI_FIFO_BURST_NUM     1
+
+#define SSI_DMA_RX_BUSY                BIT(1)
+#define SSI_DMA_TX_BUSY                BIT(0)
 
 static inline unsigned int bytes_per_word(unsigned int bits)
 {
        return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
 }
 
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+                                          u32 mask)
 {
-       struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
        u32 val;
 
        val = readl(priv->base + SSI_IE);
@@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
        writel(val, priv->base + SSI_IE);
 }
 
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+                                           u32 mask)
 {
-       struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
        u32 val;
 
        val = readl(priv->base + SSI_IE);
@@ -334,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
        writel(val, priv->base + SSI_FPS);
 }
 
+static bool uniphier_spi_can_dma(struct spi_master *master,
+                                struct spi_device *spi,
+                                struct spi_transfer *t)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+       if ((!master->dma_tx && !master->dma_rx)
+           || (!master->dma_tx && t->tx_buf)
+           || (!master->dma_rx && t->rx_buf))
+               return false;
+
+       return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+       struct spi_master *master = data;
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+       uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+       if (!(state & SSI_DMA_TX_BUSY))
+               spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+       struct spi_master *master = data;
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+       uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+       if (!(state & SSI_DMA_RX_BUSY))
+               spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+                                        struct spi_device *spi,
+                                        struct spi_transfer *t)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+       int buswidth;
+
+       atomic_set(&priv->dma_busy, 0);
+
+       uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+       if (priv->bits_per_word <= 8)
+               buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       else if (priv->bits_per_word <= 16)
+               buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       else
+               buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+       if (priv->rx_buf) {
+               struct dma_slave_config rxconf = {
+                       .direction = DMA_DEV_TO_MEM,
+                       .src_addr = priv->base_dma_addr + SSI_RXDR,
+                       .src_addr_width = buswidth,
+                       .src_maxburst = SSI_FIFO_BURST_NUM,
+               };
+
+               dmaengine_slave_config(master->dma_rx, &rxconf);
+
+               rxdesc = dmaengine_prep_slave_sg(
+                       master->dma_rx,
+                       t->rx_sg.sgl, t->rx_sg.nents,
+                       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!rxdesc)
+                       goto out_err_prep;
+
+               rxdesc->callback = uniphier_spi_dma_rxcb;
+               rxdesc->callback_param = master;
+
+               uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+               atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+               dmaengine_submit(rxdesc);
+               dma_async_issue_pending(master->dma_rx);
+       }
+
+       if (priv->tx_buf) {
+               struct dma_slave_config txconf = {
+                       .direction = DMA_MEM_TO_DEV,
+                       .dst_addr = priv->base_dma_addr + SSI_TXDR,
+                       .dst_addr_width = buswidth,
+                       .dst_maxburst = SSI_FIFO_BURST_NUM,
+               };
+
+               dmaengine_slave_config(master->dma_tx, &txconf);
+
+               txdesc = dmaengine_prep_slave_sg(
+                       master->dma_tx,
+                       t->tx_sg.sgl, t->tx_sg.nents,
+                       DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!txdesc)
+                       goto out_err_prep;
+
+               txdesc->callback = uniphier_spi_dma_txcb;
+               txdesc->callback_param = master;
+
+               uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+               atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+               dmaengine_submit(txdesc);
+               dma_async_issue_pending(master->dma_tx);
+       }
+
+       /* signal that we need to wait for completion */
+       return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+       if (rxdesc)
+               dmaengine_terminate_sync(master->dma_rx);
+
+       return -EINVAL;
+}
+
 static int uniphier_spi_transfer_one_irq(struct spi_master *master,
                                         struct spi_device *spi,
                                         struct spi_transfer *t)
@@ -346,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
 
        uniphier_spi_fill_tx_fifo(priv);
 
-       uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+       uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
 
        time_left = wait_for_completion_timeout(&priv->xfer_done,
                                        msecs_to_jiffies(SSI_TIMEOUT_MS));
 
-       uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+       uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
 
        if (!time_left) {
                dev_err(dev, "transfer timeout.\n");
@@ -395,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
 {
        struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
        unsigned long threshold;
+       bool use_dma;
 
        /* Terminate and return success for 0 byte length transfer */
        if (!t->len)
@@ -402,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
 
        uniphier_spi_setup_transfer(spi, t);
 
+       use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+       if (use_dma)
+               return uniphier_spi_transfer_one_dma(master, spi, t);
+
        /*
         * If the transfer operation will take longer than
         * SSI_POLL_TIMEOUT_US, it should use irq.
@@ -432,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
        return 0;
 }
 
+static void uniphier_spi_handle_err(struct spi_master *master,
+                                   struct spi_message *msg)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       u32 val;
+
+       /* stop running spi transfer */
+       writel(0, priv->base + SSI_CTL);
+
+       /* reset FIFOs */
+       val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+       writel(val, priv->base + SSI_FC);
+
+       uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+       if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+               dmaengine_terminate_async(master->dma_tx);
+               atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+       }
+
+       if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+               dmaengine_terminate_async(master->dma_rx);
+               atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+       }
+}
+
 static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
 {
        struct uniphier_spi_priv *priv = dev_id;
@@ -477,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev)
 {
        struct uniphier_spi_priv *priv;
        struct spi_master *master;
+       struct resource *res;
+       struct dma_slave_caps caps;
+       u32 dma_tx_burst = 0, dma_rx_burst = 0;
        unsigned long clk_rate;
        int irq;
        int ret;
@@ -491,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev)
        priv->master = master;
        priv->is_save_param = false;
 
-       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
                goto out_master_put;
        }
+       priv->base_dma_addr = res->start;
 
        priv->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(priv->clk)) {
@@ -538,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev)
                                = uniphier_spi_prepare_transfer_hardware;
        master->unprepare_transfer_hardware
                                = uniphier_spi_unprepare_transfer_hardware;
+       master->handle_err = uniphier_spi_handle_err;
+       master->can_dma = uniphier_spi_can_dma;
+
        master->num_chipselect = 1;
+       master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+       master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+       if (IS_ERR_OR_NULL(master->dma_tx)) {
+               if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+                       goto out_disable_clk;
+               master->dma_tx = NULL;
+               dma_tx_burst = INT_MAX;
+       } else {
+               ret = dma_get_slave_caps(master->dma_tx, &caps);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+                               ret);
+                       goto out_disable_clk;
+               }
+               dma_tx_burst = caps.max_burst;
+       }
+
+       master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+       if (IS_ERR_OR_NULL(master->dma_rx)) {
+               if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+                       goto out_disable_clk;
+               master->dma_rx = NULL;
+               dma_rx_burst = INT_MAX;
+       } else {
+               ret = dma_get_slave_caps(master->dma_rx, &caps);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+                               ret);
+                       goto out_disable_clk;
+               }
+               dma_rx_burst = caps.max_burst;
+       }
+
+       master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
 
        ret = devm_spi_register_master(&pdev->dev, master);
        if (ret)
@@ -558,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev)
 {
        struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
 
+       if (priv->master->dma_tx)
+               dma_release_channel(priv->master->dma_tx);
+       if (priv->master->dma_rx)
+               dma_release_channel(priv->master->dma_rx);
+
        clk_disable_unprepare(priv->clk);
 
        return 0;
index 8994545..38b4c78 100644 (file)
@@ -1674,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
                }
        }
 
+       if (unlikely(ctlr->ptp_sts_supported)) {
+               list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+                       WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
+                       WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
+               }
+       }
+
        spi_unmap_msg(ctlr, mesg);
 
        if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -2451,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
        int nb, i;
        struct gpio_desc **cs;
        struct device *dev = &ctlr->dev;
+       unsigned long native_cs_mask = 0;
+       unsigned int num_cs_gpios = 0;
 
        nb = gpiod_count(dev, "cs");
        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
@@ -2492,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
                        if (!gpioname)
                                return -ENOMEM;
                        gpiod_set_consumer_name(cs[i], gpioname);
+                       num_cs_gpios++;
+                       continue;
                }
+
+               if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+                       dev_err(dev, "Invalid native chip select %d\n", i);
+                       return -EINVAL;
+               }
+               native_cs_mask |= BIT(i);
+       }
+
+       ctlr->unused_native_cs = ffz(native_cs_mask);
+       if (num_cs_gpios && ctlr->max_native_cs &&
+           ctlr->unused_native_cs >= ctlr->max_native_cs) {
+               dev_err(dev, "No unused native chip select available\n");
+               return -EINVAL;
        }
 
        return 0;
index 7251a87..b94ed4e 100644 (file)
@@ -4149,9 +4149,6 @@ int iscsit_close_connection(
        iscsit_stop_nopin_response_timer(conn);
        iscsit_stop_nopin_timer(conn);
 
-       if (conn->conn_transport->iscsit_wait_conn)
-               conn->conn_transport->iscsit_wait_conn(conn);
-
        /*
         * During Connection recovery drop unacknowledged out of order
         * commands for this connection, and prepare the other commands
@@ -4237,6 +4234,9 @@ int iscsit_close_connection(
        target_sess_cmd_list_set_waiting(sess->se_sess);
        target_wait_for_sess_cmds(sess->se_sess);
 
+       if (conn->conn_transport->iscsit_wait_conn)
+               conn->conn_transport->iscsit_wait_conn(conn);
+
        ahash_request_free(conn->conn_tx_hash);
        if (conn->conn_rx_hash) {
                struct crypto_ahash *tfm;
index d1ad512..3ca71e3 100644 (file)
@@ -3,6 +3,7 @@
 config OPTEE
        tristate "OP-TEE"
        depends on HAVE_ARM_SMCCC
+       depends on MMU
        help
          This implements the OP-TEE Trusted Execution Environment (TEE)
          driver.
index fd5133e..78ba5f9 100644 (file)
@@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
                _leave(" = -ENAMETOOLONG");
                return ERR_PTR(-ENAMETOOLONG);
        }
-       if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
+
+       /* Prohibit cell names that contain unprintable chars, '/' and '@' or
+        * that begin with a dot.  This also precludes "@cell".
+        */
+       if (name[0] == '.')
                return ERR_PTR(-EINVAL);
+       for (i = 0; i < namelen; i++) {
+               char ch = name[i];
+               if (!isprint(ch) || ch == '/' || ch == '@')
+                       return ERR_PTR(-EINVAL);
+       }
 
        _enter("%*.*s,%s", namelen, namelen, name, addresses);
 
index f639dde..ba4d8f3 100644 (file)
@@ -500,11 +500,8 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
                              &dev_replace->scrub_progress, 0, 1);
 
        ret = btrfs_dev_replace_finishing(fs_info, ret);
-       if (ret == -EINPROGRESS) {
+       if (ret == -EINPROGRESS)
                ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
-       } else if (ret != -ECANCELED) {
-               WARN_ON(ret);
-       }
 
        return ret;
 
index 21de630..fd266a2 100644 (file)
@@ -3577,17 +3577,27 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 * This can easily boost the amount of SYSTEM chunks if cleaner
                 * thread can't be triggered fast enough, and use up all space
                 * of btrfs_super_block::sys_chunk_array
+                *
+                * While for dev replace, we need to try our best to mark block
+                * group RO, to prevent race between:
+                * - Write duplication
+                *   Contains latest data
+                * - Scrub copy
+                *   Contains data from commit tree
+                *
+                * If target block group is not marked RO, nocow writes can
+                * be overwritten by scrub copy, causing data corruption.
+                * So for dev-replace, it's not allowed to continue if a block
+                * group is not RO.
                 */
-               ret = btrfs_inc_block_group_ro(cache, false);
-               scrub_pause_off(fs_info);
-
+               ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
                if (ret == 0) {
                        ro_set = 1;
-               } else if (ret == -ENOSPC) {
+               } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
                        /*
                         * btrfs_inc_block_group_ro return -ENOSPC when it
                         * failed in creating new chunk for metadata.
-                        * It is not a problem for scrub/replace, because
+                        * It is not a problem for scrub, because
                         * metadata are always cowed, and our scrub paused
                         * commit_transactions.
                         */
@@ -3596,9 +3606,22 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        btrfs_warn(fs_info,
                                   "failed setting block group ro: %d", ret);
                        btrfs_put_block_group(cache);
+                       scrub_pause_off(fs_info);
                        break;
                }
 
+               /*
+                * Now the target block is marked RO, wait for nocow writes to
+                * finish before dev-replace.
+                * COW is fine, as COW never overwrites extents in commit tree.
+                */
+               if (sctx->is_dev_replace) {
+                       btrfs_wait_nocow_writers(cache);
+                       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
+                                       cache->length);
+               }
+
+               scrub_pause_off(fs_info);
                down_write(&dev_replace->rwsem);
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
index 374db1b..145d46b 100644 (file)
@@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kref *kref)
                /* avoid calling iput_final() in mds dispatch threads */
                ceph_async_iput(req->r_inode);
        }
-       if (req->r_parent)
+       if (req->r_parent) {
                ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+               ceph_async_iput(req->r_parent);
+       }
        ceph_async_iput(req->r_target_inode);
        if (req->r_dentry)
                dput(req->r_dentry);
@@ -2676,8 +2678,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
        /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
        if (req->r_inode)
                ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
-       if (req->r_parent)
+       if (req->r_parent) {
                ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+               ihold(req->r_parent);
+       }
        if (req->r_old_dentry_dir)
                ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
                                  CEPH_CAP_PIN);
index 187dd94..e54556b 100644 (file)
@@ -4463,13 +4463,15 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
+       if (up.resv)
+               return -EINVAL;
        if (check_add_overflow(up.offset, nr_args, &done))
                return -EOVERFLOW;
        if (done > ctx->nr_user_files)
                return -EINVAL;
 
        done = 0;
-       fds = (__s32 __user *) up.fds;
+       fds = u64_to_user_ptr(up.fds);
        while (nr_args) {
                struct fixed_file_table *table;
                unsigned index;
@@ -5042,10 +5044,6 @@ static int io_uring_flush(struct file *file, void *data)
        struct io_ring_ctx *ctx = file->private_data;
 
        io_uring_cancel_files(ctx, data);
-       if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
-               io_cqring_overflow_flush(ctx, true);
-               io_wq_cancel_all(ctx->io_wq);
-       }
        return 0;
 }
 
@@ -5159,12 +5157,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
        } else if (to_submit) {
                struct mm_struct *cur_mm;
 
-               if (current->mm != ctx->sqo_mm ||
-                   current_cred() != ctx->creds) {
-                       ret = -EPERM;
-                       goto out;
-               }
-
                to_submit = min(to_submit, ctx->sq_entries);
                mutex_lock(&ctx->uring_lock);
                /* already have mm, so io_submit_sqes() won't try to grab it */
index d2720dc..4fb61e0 100644 (file)
@@ -1001,7 +1001,8 @@ static int may_linkat(struct path *link)
  * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
  *                       should be allowed, or not, on files that already
  *                       exist.
- * @dir: the sticky parent directory
+ * @dir_mode: mode bits of directory
+ * @dir_uid: owner of directory
  * @inode: the inode of the file to open
  *
  * Block an O_CREAT open of a FIFO (or a regular file) when:
@@ -1017,18 +1018,18 @@ static int may_linkat(struct path *link)
  *
  * Returns 0 if the open is allowed, -ve on error.
  */
-static int may_create_in_sticky(struct dentry * const dir,
+static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
                                struct inode * const inode)
 {
        if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
            (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
-           likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
-           uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+           likely(!(dir_mode & S_ISVTX)) ||
+           uid_eq(inode->i_uid, dir_uid) ||
            uid_eq(current_fsuid(), inode->i_uid))
                return 0;
 
-       if (likely(dir->d_inode->i_mode & 0002) ||
-           (dir->d_inode->i_mode & 0020 &&
+       if (likely(dir_mode & 0002) ||
+           (dir_mode & 0020 &&
             ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
              (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
                const char *operation = S_ISFIFO(inode->i_mode) ?
@@ -3201,6 +3202,8 @@ static int do_last(struct nameidata *nd,
                   struct file *file, const struct open_flags *op)
 {
        struct dentry *dir = nd->path.dentry;
+       kuid_t dir_uid = dir->d_inode->i_uid;
+       umode_t dir_mode = dir->d_inode->i_mode;
        int open_flag = op->open_flag;
        bool will_truncate = (open_flag & O_TRUNC) != 0;
        bool got_write = false;
@@ -3331,7 +3334,7 @@ finish_open:
                error = -EISDIR;
                if (d_is_dir(nd->path.dentry))
                        goto out;
-               error = may_create_in_sticky(dir,
+               error = may_create_in_sticky(dir_mode, dir_uid,
                                             d_backing_inode(nd->path.dentry));
                if (unlikely(error))
                        goto out;
index d26d5ea..de2ecef 100644 (file)
@@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
  * filename length, and the above "soft error" worry means
  * that it's probably better left alone until we have that
  * issue clarified.
+ *
+ * Note the PATH_MAX check - it's arbitrary but the real
+ * kernel limit on a possible path component, not NAME_MAX,
+ * which is the technical standard limit.
  */
 static int verify_dirent_name(const char *name, int len)
 {
-       if (!len)
+       if (len <= 0 || len >= PATH_MAX)
                return -EIO;
        if (memchr(name, '/', len))
                return -EIO;
@@ -206,7 +210,7 @@ struct linux_dirent {
 struct getdents_callback {
        struct dir_context ctx;
        struct linux_dirent __user * current_dir;
-       struct linux_dirent __user * previous;
+       int prev_reclen;
        int count;
        int error;
 };
@@ -214,12 +218,13 @@ struct getdents_callback {
 static int filldir(struct dir_context *ctx, const char *name, int namlen,
                   loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct linux_dirent __user * dirent;
+       struct linux_dirent __user *dirent, *prev;
        struct getdents_callback *buf =
                container_of(ctx, struct getdents_callback, ctx);
        unsigned long d_ino;
        int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
                sizeof(long));
+       int prev_reclen;
 
        buf->error = verify_dirent_name(name, namlen);
        if (unlikely(buf->error))
@@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
                buf->error = -EOVERFLOW;
                return -EOVERFLOW;
        }
-       dirent = buf->previous;
-       if (dirent && signal_pending(current))
+       prev_reclen = buf->prev_reclen;
+       if (prev_reclen && signal_pending(current))
                return -EINTR;
-
-       /*
-        * Note! This range-checks 'previous' (which may be NULL).
-        * The real range was checked in getdents
-        */
-       if (!user_access_begin(dirent, sizeof(*dirent)))
-               goto efault;
-       if (dirent)
-               unsafe_put_user(offset, &dirent->d_off, efault_end);
        dirent = buf->current_dir;
+       prev = (void __user *) dirent - prev_reclen;
+       if (!user_access_begin(prev, reclen + prev_reclen))
+               goto efault;
+
+       /* This might be 'dirent->d_off', but if so it will get overwritten */
+       unsafe_put_user(offset, &prev->d_off, efault_end);
        unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
        unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
        unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
        unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
        user_access_end();
 
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
+       buf->current_dir = (void __user *)dirent + reclen;
+       buf->prev_reclen = reclen;
        buf->count -= reclen;
        return 0;
 efault_end:
@@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
                struct linux_dirent __user *, dirent, unsigned int, count)
 {
        struct fd f;
-       struct linux_dirent __user * lastdirent;
        struct getdents_callback buf = {
                .ctx.actor = filldir,
                .count = count,
@@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
+       if (buf.prev_reclen) {
+               struct linux_dirent __user * lastdirent;
+               lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
+
                if (put_user(buf.ctx.pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
@@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
 struct getdents_callback64 {
        struct dir_context ctx;
        struct linux_dirent64 __user * current_dir;
-       struct linux_dirent64 __user * previous;
+       int prev_reclen;
        int count;
        int error;
 };
@@ -307,11 +309,12 @@ struct getdents_callback64 {
 static int filldir64(struct dir_context *ctx, const char *name, int namlen,
                     loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct linux_dirent64 __user *dirent;
+       struct linux_dirent64 __user *dirent, *prev;
        struct getdents_callback64 *buf =
                container_of(ctx, struct getdents_callback64, ctx);
        int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
                sizeof(u64));
+       int prev_reclen;
 
        buf->error = verify_dirent_name(name, namlen);
        if (unlikely(buf->error))
@@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
-       dirent = buf->previous;
-       if (dirent && signal_pending(current))
+       prev_reclen = buf->prev_reclen;
+       if (prev_reclen && signal_pending(current))
                return -EINTR;
-
-       /*
-        * Note! This range-checks 'previous' (which may be NULL).
-        * The real range was checked in getdents
-        */
-       if (!user_access_begin(dirent, sizeof(*dirent)))
-               goto efault;
-       if (dirent)
-               unsafe_put_user(offset, &dirent->d_off, efault_end);
        dirent = buf->current_dir;
+       prev = (void __user *)dirent - prev_reclen;
+       if (!user_access_begin(prev, reclen + prev_reclen))
+               goto efault;
+
+       /* This might be 'dirent->d_off', but if so it will get overwritten */
+       unsafe_put_user(offset, &prev->d_off, efault_end);
        unsafe_put_user(ino, &dirent->d_ino, efault_end);
        unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
        unsafe_put_user(d_type, &dirent->d_type, efault_end);
        unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
        user_access_end();
 
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
+       buf->prev_reclen = reclen;
+       buf->current_dir = (void __user *)dirent + reclen;
        buf->count -= reclen;
        return 0;
+
 efault_end:
        user_access_end();
 efault:
@@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
                    unsigned int count)
 {
        struct fd f;
-       struct linux_dirent64 __user * lastdirent;
        struct getdents_callback64 buf = {
                .ctx.actor = filldir64,
                .count = count,
@@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
+       if (buf.prev_reclen) {
+               struct linux_dirent64 __user * lastdirent;
                typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+
+               lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
                if (__put_user(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
index 62b40df..28b241c 100644 (file)
@@ -319,8 +319,12 @@ static int reiserfs_for_each_xattr(struct inode *inode,
 out_dir:
        dput(dir);
 out:
-       /* -ENODATA isn't an error */
-       if (err == -ENODATA)
+       /*
+        * -ENODATA: this object doesn't have any xattrs
+        * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk.
+        * Neither are errors
+        */
+       if (err == -ENODATA || err == -EOPNOTSUPP)
                err = 0;
        return err;
 }
diff --git a/include/dt-bindings/dma/x1830-dma.h b/include/dt-bindings/dma/x1830-dma.h
new file mode 100644 (file)
index 0000000..35bcb89
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1830 DMA bindings.
+ *
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1830_DMA_H__
+#define __DT_BINDINGS_DMA_X1830_DMA_H__
+
+/*
+ * Request type numbers for the X1830 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1830_DMA_I2S0_TX      0x6
+#define X1830_DMA_I2S0_RX      0x7
+#define X1830_DMA_AUTO         0x8
+#define X1830_DMA_SADC_RX      0x9
+#define X1830_DMA_UART1_TX     0x12
+#define X1830_DMA_UART1_RX     0x13
+#define X1830_DMA_UART0_TX     0x14
+#define X1830_DMA_UART0_RX     0x15
+#define X1830_DMA_SSI0_TX      0x16
+#define X1830_DMA_SSI0_RX      0x17
+#define X1830_DMA_SSI1_TX      0x18
+#define X1830_DMA_SSI1_RX      0x19
+#define X1830_DMA_MSC0_TX      0x1a
+#define X1830_DMA_MSC0_RX      0x1b
+#define X1830_DMA_MSC1_TX      0x1c
+#define X1830_DMA_MSC1_RX      0x1d
+#define X1830_DMA_DMIC_RX      0x21
+#define X1830_DMA_SMB0_TX      0x24
+#define X1830_DMA_SMB0_RX      0x25
+#define X1830_DMA_SMB1_TX      0x26
+#define X1830_DMA_SMB1_RX      0x27
+#define X1830_DMA_DES_TX       0x2e
+#define X1830_DMA_DES_RX       0x2f
+
+#endif /* __DT_BINDINGS_DMA_X1830_DMA_H__ */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
new file mode 100644 (file)
index 0000000..61d5cc0
--- /dev/null
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_H_
+#define K3_PSIL_H_
+
+#include <linux/types.h>
+
+#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000
+
+struct device;
+
+/**
+ * enum udma_tp_level - Channel Throughput Levels
+ * @UDMA_TP_NORMAL:    Normal channel
+ * @UDMA_TP_HIGH:      High Throughput channel
+ * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel
+ */
+enum udma_tp_level {
+       UDMA_TP_NORMAL = 0,
+       UDMA_TP_HIGH,
+       UDMA_TP_ULTRAHIGH,
+       UDMA_TP_LAST,
+};
+
+/**
+ * enum psil_endpoint_type - PSI-L Endpoint type
+ * @PSIL_EP_NATIVE:    Normal channel
+ * @PSIL_EP_PDMA_XY:   XY mode PDMA
+ * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA
+ * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA
+ */
+enum psil_endpoint_type {
+       PSIL_EP_NATIVE = 0,
+       PSIL_EP_PDMA_XY,
+       PSIL_EP_PDMA_MCAN,
+       PSIL_EP_PDMA_AASRC,
+};
+
+/**
+ * struct psil_endpoint_config - PSI-L Endpoint configuration
+ * @ep_type:           PSI-L endpoint type
+ * @pkt_mode:          If set, the channel must be in Packet mode, otherwise in
+ *                     TR mode
+ * @notdpkt:           TDCM must be suppressed on the TX channel
+ * @needs_epib:                Endpoint needs EPIB
+ * @psd_size:          If set, PSdata is used by the endpoint
+ * @channel_tpl:       Desired throughput level for the channel
+ * @pdma_acc32:                ACC32 must be enabled on the PDMA side
+ * @pdma_burst:                BURST must be enabled on the PDMA side
+ */
+struct psil_endpoint_config {
+       enum psil_endpoint_type ep_type;
+
+       unsigned pkt_mode:1;
+       unsigned notdpkt:1;
+       unsigned needs_epib:1;
+       u32 psd_size;
+       enum udma_tp_level channel_tpl;
+
+       /* PDMA properties, valid for PSIL_EP_PDMA_* */
+       unsigned pdma_acc32:1;
+       unsigned pdma_burst:1;
+};
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+                          struct psil_endpoint_config *ep_config);
+
+#endif /* K3_PSIL_H_ */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
new file mode 100644 (file)
index 0000000..caadbab
--- /dev/null
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_GLUE_H_
+#define K3_UDMA_GLUE_H_
+
+#include <linux/types.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+
+struct k3_udma_glue_tx_channel_cfg {
+       struct k3_ring_cfg tx_cfg;
+       struct k3_ring_cfg txcq_cfg;
+
+       bool tx_pause_on_err;
+       bool tx_filt_einfo;
+       bool tx_filt_pswords;
+       bool tx_supr_tdpkt;
+       u32  swdata_size;
+};
+
+struct k3_udma_glue_tx_channel;
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+               const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                            struct cppi5_host_desc_t *desc_tx,
+                            dma_addr_t desc_dma);
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                           dma_addr_t *desc_dma);
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                              bool sync);
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+               void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+
+enum {
+       K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
+       K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
+       K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
+       K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
+};
+
+/**
+ * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
+ *
+ * @rx_cfg:            RX ring configuration
+ * @rxfdq_cfg:         RX free Host PD ring configuration
+ * @ring_rxq_id:       RX ring id (or -1 for any)
+ * @ring_rxfdq0_id:    RX free Host PD ring (FDQ) if (or -1 for any)
+ * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try)
+ * @src_tag_lo_sel:    Rx Source Tag Low Byte Selector in Host PD
+ */
+struct k3_udma_glue_rx_flow_cfg {
+       struct k3_ring_cfg rx_cfg;
+       struct k3_ring_cfg rxfdq_cfg;
+       int ring_rxq_id;
+       int ring_rxfdq0_id;
+       bool rx_error_handling;
+       int src_tag_lo_sel;
+};
+
+/**
+ * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
+ *
+ * @psdata_size:       SW Data is present in Host PD of @swdata_size bytes
+ * @flow_id_base:      first flow_id used by channel.
+ *                     if @flow_id_base = -1 - range of GP rflows will be
+ *                     allocated dynamically.
+ * @flow_id_num:       number of RX flows used by channel
+ * @flow_id_use_rxchan_id:     use RX channel id as flow id,
+ *                             used only if @flow_id_num = 1
+ * @remote             indication that RX channel is remote - some remote CPU
+ *                     core owns and control the RX channel. Linux Host only
+ *                     allowed to attach and configure RX Flow within RX
+ *                     channel. if set - not RX channel operation will be
+ *                     performed by K3 NAVSS DMA glue interface.
+ * @def_flow_cfg       default RX flow configuration,
+ *                     used only if @flow_id_num = 1
+ */
+struct k3_udma_glue_rx_channel_cfg {
+       u32  swdata_size;
+       int  flow_id_base;
+       int  flow_id_num;
+       bool flow_id_use_rxchan_id;
+       bool remote;
+
+       struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
+};
+
+struct k3_udma_glue_rx_channel;
+
+struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
+               struct device *dev,
+               const char *name,
+               struct k3_udma_glue_rx_channel_cfg *cfg);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                              bool sync);
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, struct cppi5_host_desc_t *desc_tx,
+               dma_addr_t desc_dma);
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+                                   u32 flow_idx);
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+                           u32 flow_num);
+void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
+                            u32 flow_num);
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, void *data,
+               void (*cleanup)(void *data, dma_addr_t desc_dma),
+               bool skip_fdq);
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+                               u32 flow_idx);
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+                                u32 flow_idx);
+
+#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
new file mode 100644 (file)
index 0000000..579356a
--- /dev/null
@@ -0,0 +1,1059 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+/**
+ * struct cppi5_desc_hdr_t - Descriptor header, present in all types of
+ *                          descriptors
+ * @pkt_info0:         Packet info word 0 (n/a in Buffer desc)
+ * @pkt_info0:         Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info0:         Packet info word 2 (n/a in Buffer desc)
+ * @src_dst_tag:       Packet info word 3 (n/a in Buffer desc)
+ */
+struct cppi5_desc_hdr_t {
+       u32 pkt_info0;
+       u32 pkt_info1;
+       u32 pkt_info2;
+       u32 src_dst_tag;
+} __packed;
+
+/**
+ * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition
+ * @hdr:               Descriptor header
+ * @next_desc:         word 4/5: Linking word
+ * @buf_ptr:           word 6/7: Buffer pointer
+ * @buf_info1:         word 8: Buffer valid data length
+ * @org_buf_len:       word 9: Original buffer length
+ * @org_buf_ptr:       word 10/11: Original buffer pointer
+ * @epib[0]:           Extended Packet Info Data (optional, 4 words), and/or
+ *                     Protocol Specific Data (optional, 0-128 bytes in
+ *                     multiples of 4), and/or
+ *                     Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_host_desc_t {
+       struct cppi5_desc_hdr_t hdr;
+       u64 next_desc;
+       u64 buf_ptr;
+       u32 buf_info1;
+       u32 org_buf_len;
+       u64 org_buf_ptr;
+       u32 epib[0];
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN                   (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE            (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE      (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT           (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK            GENMASK(31, 30)
+#define   CPPI5_INFO0_DESC_TYPE_VAL_HOST       (1U)
+#define   CPPI5_INFO0_DESC_TYPE_VAL_MONO       (2U)
+#define   CPPI5_INFO0_DESC_TYPE_VAL_TR         (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT         BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION      BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT    (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK     GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT         (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK          GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT                (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK         GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT         (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK          GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT           (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK            GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT          (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK           GENMASK(13, 0)
+#define CPPI5_INFO1_DESC_FLOWID_DEFAULT                CPPI5_INFO1_DESC_FLOWID_MASK
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT                (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK         GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY            BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET             BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY         BIT(16)
+#define CPPI5_INFO2_DESC_RETP_MASK             GENMASK(18, 16)
+
+#define CPPI5_INFO2_DESC_RETQ_SHIFT            (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK             GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT          (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK           GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT          (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK           GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT    (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK     GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT    (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK     GENMASK(27, 0)
+
+/**
+ * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block
+ * @timestamp:         word 0: application specific timestamp
+ * @sw_info0:          word 1: Software Info 0
+ * @sw_info1:          word 1: Software Info 1
+ * @sw_info2:          word 1: Software Info 2
+ */
+struct cppi5_desc_epib_t {
+       u32 timestamp;  /* w0: application specific timestamp */
+       u32 sw_info0;   /* w1: Software Info 0 */
+       u32 sw_info1;   /* w2: Software Info 1 */
+       u32 sw_info2;   /* w3: Software Info 2 */
+};
+
+/**
+ * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
+ * @hdr:               Descriptor header
+ * @epib[0]:           Extended Packet Info Data (optional, 4 words), and/or
+ *                     Protocol Specific Data (optional, 0-128 bytes in
+ *                     multiples of 4), and/or
+ *                     Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_monolithic_desc_t {
+       struct cppi5_desc_hdr_t hdr;
+       u32 epib[0];
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT    (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK     GENMASK(26, 18)
+
+/*
+ * Reload Count:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1-0x1ff = Vector to the Reload Index and resume processing
+ * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT                (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK         GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX          (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE     CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT                (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK         GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX          (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT       (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK                GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT       (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK                GENMASK(26, 24)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B   (0)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B   (1U)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B   (2U)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B  (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+       print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE,
+                      32, 4, desc, size, false);
+}
+
+#define CPPI5_TDCM_MARKER                      (0x1)
+/**
+ * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
+ * @paddr: Physical address of the packet popped from the ring
+ *
+ * Returns true if the address indicates TDCM
+ */
+static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
+{
+       return (paddr & CPPI5_TDCM_MARKER) ? true : false;
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+       return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+               CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+       return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+               CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+                                        u32 *pkt_id, u32 *flow_id)
+{
+       *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+                  CPPI5_INFO1_DESC_PKTID_SHIFT;
+       *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+                   CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+                                        u32 pkt_id, u32 flow_id)
+{
+       desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK |
+                                CPPI5_INFO1_DESC_FLOWID_MASK);
+       desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+                               CPPI5_INFO1_DESC_PKTID_MASK;
+       desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+                               CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ *  CPPI5_INFO2_HDESC_RETPOLICY
+ *  CPPI5_INFO2_HDESC_EARLYRET
+ *  CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+                                           u32 flags, u32 return_ring_id)
+{
+       desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK |
+                                CPPI5_INFO2_DESC_RETQ_MASK);
+       desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK;
+       desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+                                          u32 *src_tag_id, u32 *dst_tag_id)
+{
+       if (src_tag_id)
+               *src_tag_id = (desc_hdr->src_dst_tag &
+                             CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+                             CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+       if (dst_tag_id)
+               *dst_tag_id = desc_hdr->src_dst_tag &
+                             CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+                                          u32 src_tag_id, u32 dst_tag_id)
+{
+       desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+                               CPPI5_INFO3_DESC_SRCTAG_MASK;
+       desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+                                       u32 sw_data_size)
+{
+       u32 desc_size;
+
+       if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+               return 0;
+
+       desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+                   sw_data_size;
+
+       if (epib)
+               desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ *     CPPI5_INFO0_HDESC_EPIB_PRESENT
+ *     CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+                                   u32 psdata_size)
+{
+       desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+                              CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+       desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+       desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ *     CPPI5_INFO0_HDESC_EPIB_PRESENT
+ *     CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+                                           u32 flags)
+{
+       desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+                                CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+       desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void
+cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+       desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+       desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+       u32 psdata_size = 0;
+
+       if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+               psdata_size = (desc->hdr.pkt_info0 &
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+       return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+       return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+                                         u32 pkt_len)
+{
+       desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK;
+       desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+       return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+               CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+                                          u32 ps_flags)
+{
+       desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK;
+       desc->hdr.pkt_info1 |= (ps_flags <<
+                               CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+                               CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+       return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+               CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+                                          u32 pkt_type)
+{
+       desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+       desc->hdr.pkt_info2 |=
+                       (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+                        CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+                                         dma_addr_t buf, u32 buf_data_len,
+                                         dma_addr_t obuf, u32 obuf_len)
+{
+       desc->buf_ptr = buf;
+       desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+       desc->org_buf_ptr = obuf;
+       desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+                                       dma_addr_t *obuf, u32 *obuf_len)
+{
+       *obuf = desc->org_buf_ptr;
+       *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+       desc->buf_ptr = desc->org_buf_ptr;
+       desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+                                          dma_addr_t hbuf_desc)
+{
+       desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t
+cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc)
+{
+       return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+       desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+       desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present -  check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+       return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata -  Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+       u32 psdata_size;
+       void *psdata;
+
+       if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+               return NULL;
+
+       psdata_size = (desc->hdr.pkt_info0 &
+                      CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+                      CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+       if (!psdata_size)
+               return NULL;
+
+       psdata = &desc->epib;
+
+       if (cppi5_hdesc_epib_present(&desc->hdr))
+               psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       return psdata;
+}
+
+/**
+ * cppi5_hdesc_get_swdata -  Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+       u32 psdata_size = 0;
+       void *swdata;
+
+       if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+               psdata_size = (desc->hdr.pkt_info0 &
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+       swdata = &desc->epib;
+
+       if (cppi5_hdesc_epib_present(&desc->hdr))
+               swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       swdata += (psdata_size << 2);
+
+       return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT                    (0U)
+#define CPPI5_TR_TYPE_MASK                     GENMASK(3, 0)
+#define CPPI5_TR_STATIC                                BIT(4)
+#define CPPI5_TR_WAIT                          BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT              (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK               GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT                        (8U)
+#define CPPI5_TR_TRIGGER0_MASK                 GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT           (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK            GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT                        (12U)
+#define CPPI5_TR_TRIGGER1_MASK                 GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT           (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK            GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT                  (16U)
+#define CPPI5_TR_CMD_ID_MASK                   GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT               (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK                        GENMASK(31, 24)
+#define   CPPI5_TR_CSF_SA_INDIRECT             BIT(0)
+#define   CPPI5_TR_CSF_DA_INDIRECT             BIT(1)
+#define   CPPI5_TR_CSF_SUPR_EVT                        BIT(2)
+#define   CPPI5_TR_CSF_EOL_ADV_SHIFT           (4U)
+#define   CPPI5_TR_CSF_EOL_ADV_MASK            GENMASK(6, 4)
+#define   CPPI5_TR_CSF_EOP                     BIT(7)
+
+/**
+ * enum cppi5_tr_types - TR types
+ * @CPPI5_TR_TYPE0:    One dimensional data move
+ * @CPPI5_TR_TYPE1:    Two dimensional data move
+ * @CPPI5_TR_TYPE2:    Three dimensional data move
+ * @CPPI5_TR_TYPE3:    Four dimensional data move
+ * @CPPI5_TR_TYPE4:    Four dimensional data move with data formatting
+ * @CPPI5_TR_TYPE5:    Four dimensional Cache Warm
+ * @CPPI5_TR_TYPE8:    Four Dimensional Block Move
+ * @CPPI5_TR_TYPE9:    Four Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE10:   Two Dimensional Block Move
+ * @CPPI5_TR_TYPE11:   Two Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE15:   Four Dimensional Block Move with Repacking and
+ *                     Indirection
+ */
+enum cppi5_tr_types {
+       CPPI5_TR_TYPE0 = 0,
+       CPPI5_TR_TYPE1,
+       CPPI5_TR_TYPE2,
+       CPPI5_TR_TYPE3,
+       CPPI5_TR_TYPE4,
+       CPPI5_TR_TYPE5,
+       /* type6-7: Reserved */
+       CPPI5_TR_TYPE8 = 8,
+       CPPI5_TR_TYPE9,
+       CPPI5_TR_TYPE10,
+       CPPI5_TR_TYPE11,
+       /* type12-14: Reserved */
+       CPPI5_TR_TYPE15 = 15,
+       CPPI5_TR_TYPE_MAX
+};
+
+/**
+ * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event
+ *                           is generated for each TR.
+ * @CPPI5_TR_EVENT_SIZE_COMPLETION:    When TR is complete and all status for
+ *                                     the TR has been received
+ * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC:     Type 0: when the last data transaction
+ *                                     is sent for the TR
+ *                                     Type 1-11: when ICNT1 is decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC:     Type 0-1,10-11: when the last data
+ *                                     transaction is sent for the TR
+ *                                     All other types: when ICNT2 is
+ *                                     decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC:     Type 0-2,10-11: when the last data
+ *                                     transaction is sent for the TR
+ *                                     All other types: when ICNT3 is
+ *                                     decremented
+ */
+enum cppi5_tr_event_size {
+       CPPI5_TR_EVENT_SIZE_COMPLETION,
+       CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+       CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+       CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+       CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger
+ *                        used to enable the TR to transfer data as specified
+ *                        by TRIGGERx_TYPE field.
+ * @CPPI5_TR_TRIGGER_NONE:             No trigger
+ * @CPPI5_TR_TRIGGER_GLOBAL0:          Global trigger 0
+ * @CPPI5_TR_TRIGGER_GLOBAL1:          Global trigger 1
+ * @CPPI5_TR_TRIGGER_LOCAL_EVENT:      Local Event
+ */
+enum cppi5_tr_trigger {
+       CPPI5_TR_TRIGGER_NONE,
+       CPPI5_TR_TRIGGER_GLOBAL0,
+       CPPI5_TR_TRIGGER_GLOBAL1,
+       CPPI5_TR_TRIGGER_LOCAL_EVENT,
+       CPPI5_TR_TRIGGER_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type
+ *                             of data transfer that will be enabled by
+ *                             receiving a trigger as specified by TRIGGERx.
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC:   The second inner most loop (ICNT1) will
+ *                                     be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC:   The third inner most loop (ICNT2) will
+ *                                     be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC:   The outer most loop (ICNT3) will be
+ *                                     decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ALL:         The entire TR will be allowed to
+ *                                     complete
+ */
+enum cppi5_tr_trigger_type {
+       CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+       CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+       CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+       CPPI5_TR_TRIGGER_TYPE_ALL,
+       CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/**
+ * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @_reserved:         Not used
+ * @addr:              Starting address for the source data or destination data
+ */
+struct cppi5_tr_type0_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 _reserved;
+       u64 addr;
+} __aligned(16) __packed;
+
+/**
+ * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @icnt1:             Total loop iteration count for level 1
+ * @addr:              Starting address for the source data or destination data
+ * @dim1:              Signed dimension for loop level 1
+ */
+struct cppi5_tr_type1_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @icnt1:             Total loop iteration count for level 1
+ * @addr:              Starting address for the source data or destination data
+ * @dim1:              Signed dimension for loop level 1
+ * @icnt2:             Total loop iteration count for level 2
+ * @_reserved:         Not used
+ * @dim2:              Signed dimension for loop level 2
+ */
+struct cppi5_tr_type2_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+       u16 icnt2;
+       u16 _reserved;
+       s32 dim2;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @icnt1:             Total loop iteration count for level 1
+ * @addr:              Starting address for the source data or destination data
+ * @dim1:              Signed dimension for loop level 1
+ * @icnt2:             Total loop iteration count for level 2
+ * @icnt3:             Total loop iteration count for level 3 (outermost)
+ * @dim2:              Signed dimension for loop level 2
+ * @dim3:              Signed dimension for loop level 3
+ */
+struct cppi5_tr_type3_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+       u16 icnt2;
+       u16 icnt3;
+       s32 dim2;
+       s32 dim3;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with
+ *                           Repacking and Indirection Support) TR (64 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost) for
+ *                     source
+ * @icnt1:             Total loop iteration count for level 1 for source
+ * @addr:              Starting address for the source data
+ * @dim1:              Signed dimension for loop level 1 for source
+ * @icnt2:             Total loop iteration count for level 2 for source
+ * @icnt3:             Total loop iteration count for level 3 (outermost) for
+ *                     source
+ * @dim2:              Signed dimension for loop level 2 for source
+ * @dim3:              Signed dimension for loop level 3 for source
+ * @_reserved:         Not used
+ * @ddim1:             Signed dimension for loop level 1 for destination
+ * @daddr:             Starting address for the destination data
+ * @ddim2:             Signed dimension for loop level 2 for destination
+ * @ddim3:             Signed dimension for loop level 3 for destination
+ * @dicnt0:            Total loop iteration count for level 0 (innermost) for
+ *                     destination
+ * @dicnt1:            Total loop iteration count for level 1 for destination
+ * @dicnt2:            Total loop iteration count for level 2 for destination
+ * @sicnt3:            Total loop iteration count for level 3 (outermost) for
+ *                     destination
+ */
+struct cppi5_tr_type15_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+       u16 icnt2;
+       u16 icnt3;
+       s32 dim2;
+       s32 dim3;
+       u32 _reserved;
+       s32 ddim1;
+       u64 daddr;
+       s32 ddim2;
+       s32 ddim3;
+       u16 dicnt0;
+       u16 dicnt1;
+       u16 dicnt2;
+       u16 dicnt3;
+} __aligned(64) __packed;
+
+/**
+ * struct cppi5_tr_resp_t - TR response record
+ * @status:            Status type and info
+ * @_reserved:         Not used
+ * @cmd_id:            Command ID for the TR for TR identification
+ * @flags:             Configuration Specific Flags
+ */
+struct cppi5_tr_resp_t {
+       u8 status;
+       u8 _reserved;
+       u8 cmd_id;
+       u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT    (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK     GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT    (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK     GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT          (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK           GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT   (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK    GENMASK(31, 24)
+
+/**
+ * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to
+ *                                 determine what type of status is being
+ *                                 returned.
+ * @CPPI5_TR_RESPONSE_STATUS_NONE:             No error, completion: completed
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR:     Transfer Error, completion: none
+ *                                             or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR:      Aborted Error, completion: none
+ *                                             or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR:   Submission Error, completion:
+ *                                             none
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR:  Unsupported Error, completion:
+ *                                             none
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion:
+ *                                             partially completed
+ * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH:  Teardown Flush, completion: none
+ */
+enum cppi5_tr_resp_status_type {
+       CPPI5_TR_RESPONSE_STATUS_NONE,
+       CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR,
+       CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
+       CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
+       CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+       CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_submission - TR Response Status field values which
+ *                                       corresponds Submission Error
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR
+ *                                             received
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN:   Channel is not owned by the
+ *                                             submitter
+ */
+enum cppi5_tr_resp_status_submission {
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which
+ *                                        corresponds Unsupported Error
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE:      TR Type not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC:       STATIC not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL:          EOL not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC
+ *                                                     not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE:                AMODE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE:       ELTYPE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT:         DFMT not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR:                SECTR not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field
+ *                                                     not supported
+ */
+enum cppi5_tr_resp_status_unsupported {
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+       /*
+        * The Size of a TR descriptor is:
+        * 1 x tr_size : the first 16 bytes is used by the packet info block +
+        * tr_count x tr_size : Transfer Request Records +
+        * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+        */
+       return tr_size * (tr_count + 1) +
+               sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ *             through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ *               indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+                                    u32 tr_count, u32 tr_size, u32 reload_idx,
+                                    u32 reload_count)
+{
+       desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+                             CPPI5_INFO0_HDESC_TYPE_SHIFT;
+       desc_hdr->pkt_info0 |=
+                       (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+                       CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+       desc_hdr->pkt_info0 |=
+                       (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+                       CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+       desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+       desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+                               CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+                               CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+                                enum cppi5_tr_types type, bool static_tr,
+                                bool wait, enum cppi5_tr_event_size event_size,
+                                u32 cmd_id)
+{
+       *flags = type;
+       *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+                 CPPI5_TR_EVENT_SIZE_MASK;
+
+       *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+                 CPPI5_TR_CMD_ID_MASK;
+
+       if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+               *flags |= CPPI5_TR_STATIC;
+
+       if (wait)
+               *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+               enum cppi5_tr_trigger trigger0,
+               enum cppi5_tr_trigger_type trigger0_type,
+               enum cppi5_tr_trigger trigger1,
+               enum cppi5_tr_trigger_type trigger1_type)
+{
+       *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK |
+                   CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK);
+       *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+                 CPPI5_TR_TRIGGER0_MASK;
+       *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+                 CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+       *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+                 CPPI5_TR_TRIGGER1_MASK;
+       *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+                 CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+       *flags &= ~CPPI5_TR_CSF_FLAGS_MASK;
+       *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+                 CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
index dad4a68..64461fc 100644 (file)
@@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
  * @bytes_transferred: byte counter
  */
 
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ *  client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ *  helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *     construct the metadata in the client's buffer
+ *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ *     descriptor
+ *   3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ *     descriptor
+ *   3. submit the transfer
+ *   4. when the transfer is completed, the metadata should be available in the
+ *     attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ *  driver. The client driver can ask for the pointer, maximum size and the
+ *  currently used size of the metadata and can directly update or read it.
+ *  dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ *  provided as helper functions.
+ *
+ *  Note: the metadata area for the descriptor is no longer valid after the
+ *  transfer has been completed (valid up to the point when the completion
+ *  callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *   2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ *     metadata area
+ *   3. update the metadata at the pointer
+ *   4. use dmaengine_desc_set_metadata_len()  to tell the DMA engine the amount
+ *     of data the client has placed into the metadata buffer
+ *   5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *   2. submit the transfer
+ *   3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ *     pointer to the engine's metadata area
+ *   4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+       DESC_METADATA_NONE = 0,
+       DESC_METADATA_CLIENT = BIT(0),
+       DESC_METADATA_ENGINE = BIT(1),
+};
+
 struct dma_chan_percpu {
        /* stats */
        unsigned long memcpy_count;
@@ -238,10 +294,12 @@ struct dma_router {
 /**
  * struct dma_chan - devices supply DMA channels, clients use them
  * @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
  * @cookie: last cookie value returned to client
  * @completed_cookie: last completed cookie for this channel
  * @chan_id: channel ID for sysfs
  * @dev: class device for sysfs
+ * @name: backlink name for sysfs
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
  * @client_count: how many clients are using this channel
@@ -252,12 +310,14 @@ struct dma_router {
  */
 struct dma_chan {
        struct dma_device *device;
+       struct device *slave;
        dma_cookie_t cookie;
        dma_cookie_t completed_cookie;
 
        /* sysfs */
        int chan_id;
        struct dma_chan_dev *dev;
+       const char *name;
 
        struct list_head device_node;
        struct dma_chan_percpu __percpu *local;
@@ -475,19 +535,36 @@ struct dmaengine_unmap_data {
        dma_addr_t addr[0];
 };
 
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+       int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+                     size_t len);
+
+       void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+                        size_t *payload_len, size_t *max_len);
+       int (*set_len)(struct dma_async_tx_descriptor *desc,
+                      size_t payload_len);
+};
+
 /**
  * struct dma_async_tx_descriptor - async transaction descriptor
  * ---dma generic offload fields---
  * @cookie: tracking cookie for this transaction, set to -EBUSY if
  *     this tx is sitting on a dependency list
  * @flags: flags to augment operation preparation, control completion, and
- *     communicate status
+ *     communicate status
  * @phys: physical address of the descriptor
  * @chan: target channel for this operation
  * @tx_submit: accept the descriptor, assign ordered cookie and mark the
  * descriptor pending. To be pushed on .issue_pending() call
  * @callback: routine to call after this operation is complete
  * @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ *     DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ *     DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ *     DMA driver if metadata mode is supported with the descriptor
  * ---async_tx api specific fields---
  * @next: at completion submit this descriptor
  * @parent: pointer to the next level up in the dependency chain
@@ -504,6 +581,8 @@ struct dma_async_tx_descriptor {
        dma_async_tx_callback_result callback_result;
        void *callback_param;
        struct dmaengine_unmap_data *unmap;
+       enum dma_desc_metadata_mode desc_metadata_mode;
+       struct dma_descriptor_metadata_ops *metadata_ops;
 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        struct dma_async_tx_descriptor *next;
        struct dma_async_tx_descriptor *parent;
@@ -611,11 +690,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
  * @residue: the remaining number of bytes left to transmit
  *     on the selected transfer for states DMA_IN_PROGRESS and
  *     DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
  */
 struct dma_tx_state {
        dma_cookie_t last;
        dma_cookie_t used;
        u32 residue;
+       u32 in_flight_bytes;
 };
 
 /**
@@ -666,6 +747,7 @@ struct dma_filter {
  * @global_node: list_head for global dma_device_list
  * @filter: information for device/slave to filter function/param mapping
  * @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
  * @max_xor: maximum number of xor sources, 0 if no capability
  * @max_pq: maximum number of PQ sources and PQ-continue capability
  * @copy_align: alignment shift for memcpy operations
@@ -674,6 +756,7 @@ struct dma_filter {
  * @fill_align: alignment shift for memset operations
  * @dev_id: unique device ID
  * @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
  * @src_addr_widths: bit mask of src addr widths the device supports
  *     Width is specified in bytes, e.g. for a device supporting
  *     a width of 4 the mask should have BIT(4) set.
@@ -718,15 +801,21 @@ struct dma_filter {
  *     will just return a simple status code
  * @device_issue_pending: push pending transactions to hardware
  * @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ *     called and there are no further references to this structure. This
+ *     must be implemented to free resources however many existing drivers
+ *     do not and are therefore not safe to unbind while in use.
+ *
  */
 struct dma_device {
-
+       struct kref ref;
        unsigned int chancnt;
        unsigned int privatecnt;
        struct list_head channels;
        struct list_head global_node;
        struct dma_filter filter;
        dma_cap_mask_t  cap_mask;
+       enum dma_desc_metadata_mode desc_metadata_modes;
        unsigned short max_xor;
        unsigned short max_pq;
        enum dmaengine_alignment copy_align;
@@ -737,6 +826,7 @@ struct dma_device {
 
        int dev_id;
        struct device *dev;
+       struct module *owner;
 
        u32 src_addr_widths;
        u32 dst_addr_widths;
@@ -800,6 +890,7 @@ struct dma_device {
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *txstate);
        void (*device_issue_pending)(struct dma_chan *chan);
+       void (*device_release)(struct dma_device *dev);
 };
 
 static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -902,6 +993,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
                                                    len, flags);
 }
 
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+               enum dma_desc_metadata_mode mode)
+{
+       if (!chan)
+               return false;
+
+       return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+                                  void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+                                     size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+                                   size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+               struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+       return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+               struct dma_async_tx_descriptor *desc, size_t *payload_len,
+               size_t *max_len)
+{
+       return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+               struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
 /**
  * dmaengine_terminate_all() - Terminate all active DMA transfers
  * @chan: The channel for which to terminate the transfers
@@ -1402,16 +1528,16 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
 int dma_async_device_register(struct dma_device *device);
 int dmaenginem_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+                                     struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+                                        struct dma_chan *chan);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
 #define dma_request_channel(mask, x, y) \
        __dma_request_channel(&(mask), x, y, NULL)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
-       __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
 
 static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
                                  dma_filter_fn fn, void *fn_param,
                                  struct device *dev, const char *name)
 {
@@ -1424,6 +1550,25 @@ static inline struct dma_chan
        if (!fn || !fn_param)
                return NULL;
 
-       return __dma_request_channel(mask, fn, fn_param, NULL);
+       return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+       switch (dir) {
+       case DMA_DEV_TO_MEM:
+               return "DEV_TO_MEM";
+       case DMA_MEM_TO_DEV:
+               return "MEM_TO_DEV";
+       case DMA_MEM_TO_MEM:
+               return "MEM_TO_MEM";
+       case DMA_DEV_TO_DEV:
+               return "DEV_TO_DEV";
+       default:
+               break;
+       }
+
+       return "invalid";
 }
 #endif /* DMAENGINE_H */
index 8bb6302..ea4c133 100644 (file)
@@ -245,6 +245,18 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
                !(disk->flags & GENHD_FL_NO_PART_SCAN);
 }
 
+static inline bool disk_has_partitions(struct gendisk *disk)
+{
+       bool ret = false;
+
+       rcu_read_lock();
+       if (rcu_dereference(disk->part_tbl)->len > 1)
+               ret = true;
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static inline dev_t disk_devt(struct gendisk *disk)
 {
        return MKDEV(disk->major, disk->first_minor);
index 5215fdb..bf2d017 100644 (file)
@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
 
 int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
 int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
+void gpiod_toggle_active_low(struct gpio_desc *desc);
 
 int gpiod_is_active_low(const struct gpio_desc *desc);
 int gpiod_cansleep(const struct gpio_desc *desc);
@@ -483,6 +484,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
        return -ENOSYS;
 }
 
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+       /* GPIO can never have been requested */
+       WARN_ON(desc);
+}
+
 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
index 7257916..5e609f2 100644 (file)
@@ -27,6 +27,7 @@ enum hwmon_sensor_types {
        hwmon_humidity,
        hwmon_fan,
        hwmon_pwm,
+       hwmon_intrusion,
        hwmon_max,
 };
 
@@ -59,7 +60,8 @@ enum hwmon_chip_attributes {
 #define HWMON_C_TEMP_SAMPLES           BIT(hwmon_chip_temp_samples)
 
 enum hwmon_temp_attributes {
-       hwmon_temp_input = 0,
+       hwmon_temp_enable,
+       hwmon_temp_input,
        hwmon_temp_type,
        hwmon_temp_lcrit,
        hwmon_temp_lcrit_hyst,
@@ -85,6 +87,7 @@ enum hwmon_temp_attributes {
        hwmon_temp_reset_history,
 };
 
+#define HWMON_T_ENABLE         BIT(hwmon_temp_enable)
 #define HWMON_T_INPUT          BIT(hwmon_temp_input)
 #define HWMON_T_TYPE           BIT(hwmon_temp_type)
 #define HWMON_T_LCRIT          BIT(hwmon_temp_lcrit)
@@ -111,6 +114,7 @@ enum hwmon_temp_attributes {
 #define HWMON_T_RESET_HISTORY  BIT(hwmon_temp_reset_history)
 
 enum hwmon_in_attributes {
+       hwmon_in_enable,
        hwmon_in_input,
        hwmon_in_min,
        hwmon_in_max,
@@ -126,9 +130,9 @@ enum hwmon_in_attributes {
        hwmon_in_max_alarm,
        hwmon_in_lcrit_alarm,
        hwmon_in_crit_alarm,
-       hwmon_in_enable,
 };
 
+#define HWMON_I_ENABLE         BIT(hwmon_in_enable)
 #define HWMON_I_INPUT          BIT(hwmon_in_input)
 #define HWMON_I_MIN            BIT(hwmon_in_min)
 #define HWMON_I_MAX            BIT(hwmon_in_max)
@@ -144,9 +148,9 @@ enum hwmon_in_attributes {
 #define HWMON_I_MAX_ALARM      BIT(hwmon_in_max_alarm)
 #define HWMON_I_LCRIT_ALARM    BIT(hwmon_in_lcrit_alarm)
 #define HWMON_I_CRIT_ALARM     BIT(hwmon_in_crit_alarm)
-#define HWMON_I_ENABLE         BIT(hwmon_in_enable)
 
 enum hwmon_curr_attributes {
+       hwmon_curr_enable,
        hwmon_curr_input,
        hwmon_curr_min,
        hwmon_curr_max,
@@ -164,6 +168,7 @@ enum hwmon_curr_attributes {
        hwmon_curr_crit_alarm,
 };
 
+#define HWMON_C_ENABLE         BIT(hwmon_curr_enable)
 #define HWMON_C_INPUT          BIT(hwmon_curr_input)
 #define HWMON_C_MIN            BIT(hwmon_curr_min)
 #define HWMON_C_MAX            BIT(hwmon_curr_max)
@@ -181,6 +186,7 @@ enum hwmon_curr_attributes {
 #define HWMON_C_CRIT_ALARM     BIT(hwmon_curr_crit_alarm)
 
 enum hwmon_power_attributes {
+       hwmon_power_enable,
        hwmon_power_average,
        hwmon_power_average_interval,
        hwmon_power_average_interval_max,
@@ -211,6 +217,7 @@ enum hwmon_power_attributes {
        hwmon_power_crit_alarm,
 };
 
+#define HWMON_P_ENABLE                 BIT(hwmon_power_enable)
 #define HWMON_P_AVERAGE                        BIT(hwmon_power_average)
 #define HWMON_P_AVERAGE_INTERVAL       BIT(hwmon_power_average_interval)
 #define HWMON_P_AVERAGE_INTERVAL_MAX   BIT(hwmon_power_average_interval_max)
@@ -241,14 +248,17 @@ enum hwmon_power_attributes {
 #define HWMON_P_CRIT_ALARM             BIT(hwmon_power_crit_alarm)
 
 enum hwmon_energy_attributes {
+       hwmon_energy_enable,
        hwmon_energy_input,
        hwmon_energy_label,
 };
 
+#define HWMON_E_ENABLE                 BIT(hwmon_energy_enable)
 #define HWMON_E_INPUT                  BIT(hwmon_energy_input)
 #define HWMON_E_LABEL                  BIT(hwmon_energy_label)
 
 enum hwmon_humidity_attributes {
+       hwmon_humidity_enable,
        hwmon_humidity_input,
        hwmon_humidity_label,
        hwmon_humidity_min,
@@ -259,6 +269,7 @@ enum hwmon_humidity_attributes {
        hwmon_humidity_fault,
 };
 
+#define HWMON_H_ENABLE                 BIT(hwmon_humidity_enable)
 #define HWMON_H_INPUT                  BIT(hwmon_humidity_input)
 #define HWMON_H_LABEL                  BIT(hwmon_humidity_label)
 #define HWMON_H_MIN                    BIT(hwmon_humidity_min)
@@ -269,6 +280,7 @@ enum hwmon_humidity_attributes {
 #define HWMON_H_FAULT                  BIT(hwmon_humidity_fault)
 
 enum hwmon_fan_attributes {
+       hwmon_fan_enable,
        hwmon_fan_input,
        hwmon_fan_label,
        hwmon_fan_min,
@@ -282,6 +294,7 @@ enum hwmon_fan_attributes {
        hwmon_fan_fault,
 };
 
+#define HWMON_F_ENABLE                 BIT(hwmon_fan_enable)
 #define HWMON_F_INPUT                  BIT(hwmon_fan_input)
 #define HWMON_F_LABEL                  BIT(hwmon_fan_label)
 #define HWMON_F_MIN                    BIT(hwmon_fan_min)
@@ -306,6 +319,13 @@ enum hwmon_pwm_attributes {
 #define HWMON_PWM_MODE                 BIT(hwmon_pwm_mode)
 #define HWMON_PWM_FREQ                 BIT(hwmon_pwm_freq)
 
+enum hwmon_intrusion_attributes {
+       hwmon_intrusion_alarm,
+       hwmon_intrusion_beep,
+};
+#define HWMON_INTRUSION_ALARM          BIT(hwmon_intrusion_alarm)
+#define HWMON_INTRUSION_BEEP           BIT(hwmon_intrusion_beep)
+
 /**
  * struct hwmon_ops - hwmon device operations
  * @is_visible: Callback to return attribute visibility. Mandatory.
index 739b7bf..8ba0424 100644 (file)
@@ -79,9 +79,6 @@
 /* Some controllers have a CBSY bit */
 #define TMIO_MMC_HAVE_CBSY             BIT(11)
 
-/* Some controllers that support HS400 use 4 taps while others use 8. */
-#define TMIO_MMC_HAVE_4TAP_HS400       BIT(13)
-
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
 void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
index 0de3d7c..4ae2f29 100644 (file)
@@ -17,10 +17,9 @@ int mmc_gpio_get_ro(struct mmc_host *host);
 int mmc_gpio_get_cd(struct mmc_host *host);
 int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
                         unsigned int idx, bool override_active_level,
-                        unsigned int debounce, bool *gpio_invert);
+                        unsigned int debounce);
 int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
-                        unsigned int idx,
-                        unsigned int debounce, bool *gpio_invert);
+                        unsigned int idx, unsigned int debounce);
 void mmc_gpio_set_cd_isr(struct mmc_host *host,
                         irqreturn_t (*isr)(int irq, void *dev_id));
 int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
index ae5e260..cac56fb 100644 (file)
@@ -3698,6 +3698,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
 int dev_get_alias(const struct net_device *, char *, size_t);
 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 int __dev_set_mtu(struct net_device *, int);
+int dev_validate_mtu(struct net_device *dev, int mtu,
+                    struct netlink_ext_ack *extack);
 int dev_set_mtu_ext(struct net_device *dev, int mtu,
                    struct netlink_ext_ack *extack);
 int dev_set_mtu(struct net_device *, int);
index 4d8b1ea..908d38d 100644 (file)
@@ -426,13 +426,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
               sizeof(*addr));
 }
 
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
-{
-       return 4 * ((((b - a + 8) / 8) + 3) / 4);
-}
-
 /* How often should the gc be run by default */
 #define IPSET_GC_TIME                  (3 * 60)
 
index cf09ab3..851425c 100644 (file)
@@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
        const struct nfnl_callback *cb; /* callback for individual types */
        struct module *owner;
        int (*commit)(struct net *net, struct sk_buff *skb);
-       int (*abort)(struct net *net, struct sk_buff *skb);
+       int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
        void (*cleanup)(struct net *net);
        bool (*valid_genid)(struct net *net, u32 genid);
 };
index 2302d13..352c0d7 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_19H_DF_F3    0x1653
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 7f8c7d9..019fecd 100644 (file)
@@ -40,6 +40,7 @@ extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
 
 extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
 extern void devm_pinctrl_put(struct pinctrl *p);
+extern int pinctrl_select_default_state(struct device *dev);
 
 #ifdef CONFIG_PM
 extern int pinctrl_pm_select_default_state(struct device *dev);
@@ -122,6 +123,11 @@ static inline void devm_pinctrl_put(struct pinctrl *p)
 {
 }
 
+static inline int pinctrl_select_default_state(struct device *dev)
+{
+       return 0;
+}
+
 static inline int pinctrl_pm_select_default_state(struct device *dev)
 {
        return 0;
index 6d54fe3..b8da8ae 100644 (file)
@@ -101,6 +101,7 @@ struct mlxreg_core_data {
  * @aggr_mask: group aggregation mask;
  * @reg: group interrupt status register;
  * @mask: group interrupt mask;
+ * @capability: group capability register;
  * @cache: last status value for elements fro the same group;
  * @count: number of available elements in the group;
  * @ind: element's index inside the group;
@@ -112,6 +113,7 @@ struct mlxreg_core_item {
        u32 aggr_mask;
        u32 reg;
        u32 mask;
+       u32 capability;
        u32 cache;
        u8 count;
        u8 ind;
index 60249e2..d39fc65 100644 (file)
@@ -58,6 +58,7 @@
 #define ASUS_WMI_DEVID_LIGHT_SENSOR    0x00050022 /* ?? */
 #define ASUS_WMI_DEVID_LIGHTBAR                0x00050025
 #define ASUS_WMI_DEVID_FAN_BOOST_MODE  0x00110018
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
 
 /* Misc */
 #define ASUS_WMI_DEVID_CAMERA          0x00060013
index 08468fc..1ea5bae 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _PMBUS_H_
 #define _PMBUS_H_
 
+#include <linux/bits.h>
+
 /* flags */
 
 /*
  * communication errors for no explicable reason. For such chips, checking
  * the status register must be disabled.
  */
-#define PMBUS_SKIP_STATUS_CHECK        (1 << 0)
+#define PMBUS_SKIP_STATUS_CHECK        BIT(0)
+
+/*
+ * PMBUS_WRITE_PROTECTED
+ * Set if the chip is write protected and write protection is not determined
+ * by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_WRITE_PROTECTED  BIT(1)
 
 struct pmbus_platform_data {
        u32 flags;              /* Device specific flags */
index dfe493a..f0a092a 100644 (file)
@@ -144,6 +144,51 @@ struct reg_sequence {
        __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
 })
 
+/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ *            Should be less than ~10us since udelay is used
+ *            (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+       u64 __timeout_us = (timeout_us); \
+       unsigned long __delay_us = (delay_us); \
+       ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+       int __ret; \
+       for (;;) { \
+               __ret = regmap_read((map), (addr), &(val)); \
+               if (__ret) \
+                       break; \
+               if (cond) \
+                       break; \
+               if ((__timeout_us) && \
+                   ktime_compare(ktime_get(), __timeout) > 0) { \
+                       __ret = regmap_read((map), (addr), &(val)); \
+                       break; \
+               } \
+               if (__delay_us) \
+                       udelay(__delay_us); \
+       } \
+       __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
 /**
  * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
  *
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
new file mode 100644 (file)
index 0000000..26f73df
--- /dev/null
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 Ring Accelerator (RA) subsystem interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __SOC_TI_K3_RINGACC_API_H_
+#define __SOC_TI_K3_RINGACC_API_H_
+
+#include <linux/types.h>
+
+struct device_node;
+
+/**
+ * enum k3_ring_mode - &struct k3_ring_cfg mode
+ *
+ * RA ring operational modes
+ *
+ * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access
+ * @K3_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires
+ *     that all accesses to the queue must go through this IP so that all
+ *     accesses to the memory are controlled and ordered. This IP then
+ *     controls the entire state of the queue, and SW has no directly control,
+ *     such as through doorbells and cannot access the storage memory directly.
+ *     This is particularly useful when more than one SW or HW entity can be
+ *     the producer and/or consumer at the same time
+ * @K3_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus
+ *     stores credentials with each message, requiring the element size to be
+ *     doubled to fit the credentials. Any exposed memory should be protected
+ *     by a firewall from unwanted access
+ */
+enum k3_ring_mode {
+       K3_RINGACC_RING_MODE_RING = 0,
+       K3_RINGACC_RING_MODE_MESSAGE,
+       K3_RINGACC_RING_MODE_CREDENTIALS,
+       K3_RINGACC_RING_MODE_INVALID
+};
+
+/**
+ * enum k3_ring_size - &struct k3_ring_cfg elm_size
+ *
+ * RA ring element's sizes in bytes.
+ */
+enum k3_ring_size {
+       K3_RINGACC_RING_ELSIZE_4 = 0,
+       K3_RINGACC_RING_ELSIZE_8,
+       K3_RINGACC_RING_ELSIZE_16,
+       K3_RINGACC_RING_ELSIZE_32,
+       K3_RINGACC_RING_ELSIZE_64,
+       K3_RINGACC_RING_ELSIZE_128,
+       K3_RINGACC_RING_ELSIZE_256,
+       K3_RINGACC_RING_ELSIZE_INVALID
+};
+
+struct k3_ringacc;
+struct k3_ring;
+
+/**
+ * enum k3_ring_cfg - RA ring configuration structure
+ *
+ * @size: Ring size, number of elements
+ * @elm_size: Ring element size
+ * @mode: Ring operational mode
+ * @flags: Ring configuration flags. Possible values:
+ *      @K3_RINGACC_RING_SHARED: when set allows to request the same ring
+ *      few times. It's usable when the same ring is used as Free Host PD ring
+ *      for different flows, for example.
+ *      Note: Locking should be done by consumer if required
+ */
+struct k3_ring_cfg {
+       u32 size;
+       enum k3_ring_size elm_size;
+       enum k3_ring_mode mode;
+#define K3_RINGACC_RING_SHARED BIT(1)
+       u32 flags;
+};
+
+#define K3_RINGACC_RING_ID_ANY (-1)
+
+/**
+ * of_k3_ringacc_get_by_phandle - find a RA by phandle property
+ * @np: device node
+ * @propname: property name containing phandle on RA node
+ *
+ * Returns pointer on the RA - struct k3_ringacc
+ * or -ENODEV if not found,
+ * or -EPROBE_DEFER if not yet registered
+ */
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+                                               const char *property);
+
+#define K3_RINGACC_RING_USE_PROXY BIT(1)
+
+/**
+ * k3_ringacc_request_ring - request ring from ringacc
+ * @ringacc: pointer on ringacc
+ * @id: ring id or K3_RINGACC_RING_ID_ANY for any general purpose ring
+ * @flags:
+ *     @K3_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and
+ *             used to access ring memory. Sopported only for rings in
+ *             Message/Credentials/Queue mode.
+ *
+ * Returns pointer on the Ring - struct k3_ring
+ * or NULL in case of failure.
+ */
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+                                       int id, u32 flags);
+
+/**
+ * k3_ringacc_ring_reset - ring reset
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx).
+ */
+void k3_ringacc_ring_reset(struct k3_ring *ring);
+/**
+ * k3_ringacc_ring_reset - ring reset for DMA rings
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings
+ * which are read by K3 UDMA, like TX or Free Host PD rings.
+ */
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ);
+
+/**
+ * k3_ringacc_ring_free - ring free
+ * @ring: pointer on Ring
+ *
+ * Resets ring and free all alocated resources.
+ */
+int k3_ringacc_ring_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_id - Get the Ring ID
+ * @ring: pointer on ring
+ *
+ * Returns the Ring ID
+ */
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_irq_num - Get the irq number for the ring
+ * @ring: pointer on ring
+ *
+ * Returns the interrupt number which can be used to request the interrupt
+ */
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_cfg - ring configure
+ * @ring: pointer on ring
+ * @cfg: Ring configuration parameters (see &struct k3_ring_cfg)
+ *
+ * Configures ring, including ring memory allocation.
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg);
+
+/**
+ * k3_ringacc_ring_get_size - get ring size
+ * @ring: pointer on ring
+ *
+ * Returns ring size in number of elements.
+ */
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_free - get free elements
+ * @ring: pointer on ring
+ *
+ * Returns number of free elements in the ring.
+ */
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_occ - get ring occupancy
+ * @ring: pointer on ring
+ *
+ * Returns total number of valid entries on the ring
+ */
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_is_full - checks if ring is full
+ * @ring: pointer on ring
+ *
+ * Returns true if the ring is full
+ */
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_push - push element to the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop - pop element from the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size..
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_push_head - push element to the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop_tail - pop element from the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring);
+
+#endif /* __SOC_TI_K3_RINGACC_API_H_ */
index 3a67a7e..6d16ba0 100644 (file)
@@ -423,6 +423,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *     GPIO descriptors rather than using global GPIO numbers grabbed by the
  *     driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
  *     and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
+ *     fill in this field with the first unused native CS, to be used by SPI
+ *     controller drivers that need to drive a native CS when using GPIO CS.
+ * @max_native_cs: When cs_gpiods is used, and this field is filled in,
+ *     spi_register_controller() will validate all native CS (including the
+ *     unused native CS) against this value.
  * @statistics: statistics for the spi_controller
  * @dma_tx: DMA transmit channel
  * @dma_rx: DMA receive channel
@@ -624,6 +630,8 @@ struct spi_controller {
        int                     *cs_gpios;
        struct gpio_desc        **cs_gpiods;
        bool                    use_gpio_descriptors;
+       u8                      unused_native_cs;
+       u8                      max_native_cs;
 
        /* statistics */
        struct spi_statistics   statistics;
index a3ecf2f..284872a 100644 (file)
@@ -6,16 +6,12 @@
  * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
  * @freq:      input clock freq to the core.
  * @baudwidth: baud rate divider width of the core.
- * @gpio_cs_count:     number of gpio pins used for chipselect.
- * @gpio_cs:   array of gpio pins used for chipselect.
  *
  * freq and baudwidth are used only if the divider is programmable.
  */
 struct tiny_spi_platform_data {
        unsigned int freq;
        unsigned int baudwidth;
-       unsigned int gpio_cs_count;
-       int *gpio_cs;
 };
 
 #endif /* _LINUX_SPI_SPI_OC_TINY_H */
index 86eecbd..f73e177 100644 (file)
@@ -416,6 +416,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
        return xa->xa_flags & XA_FLAGS_MARK(mark);
 }
 
+/**
+ * xa_for_each_range() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ * @last: Last index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index.  You may modify @index during the iteration if you
+ * want to skip or reprocess indices.  It is safe to modify the array
+ * during the iteration.  At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n).  You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_range() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_range().
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ */
+#define xa_for_each_range(xa, index, entry, start, last)               \
+       for (index = start,                                             \
+            entry = xa_find(xa, &index, last, XA_PRESENT);             \
+            entry;                                                     \
+            entry = xa_find_after(xa, &index, last, XA_PRESENT))
+
 /**
  * xa_for_each_start() - Iterate over a portion of an XArray.
  * @xa: XArray.
@@ -439,11 +469,8 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
  *
  * Context: Any context.  Takes and releases the RCU lock.
  */
-#define xa_for_each_start(xa, index, entry, start)                     \
-       for (index = start,                                             \
-            entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT);        \
-            entry;                                                     \
-            entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+#define xa_for_each_start(xa, index, entry, start) \
+       xa_for_each_range(xa, index, entry, start, ULONG_MAX)
 
 /**
  * xa_for_each() - Iterate over present entries in an XArray.
@@ -508,6 +535,14 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
                                spin_lock_irqsave(&(xa)->xa_lock, flags)
 #define xa_unlock_irqrestore(xa, flags) \
                                spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+#define xa_lock_nested(xa, subclass) \
+                               spin_lock_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_bh_nested(xa, subclass) \
+                               spin_lock_bh_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irq_nested(xa, subclass) \
+                               spin_lock_irq_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irqsave_nested(xa, flags, subclass) \
+               spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
 
 /*
  * Versions of the normal API which require the caller to hold the
index 286fd96..a1a8d45 100644 (file)
@@ -7,6 +7,7 @@
 struct netns_nftables {
        struct list_head        tables;
        struct list_head        commit_list;
+       struct list_head        module_list;
        struct mutex            commit_mutex;
        unsigned int            base_seq;
        u8                      gencursor;
index 9a0e8af..a5ccfa6 100644 (file)
@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
            TP_PROTO(xen_mc_callback_fn_t fn, void *data),
            TP_ARGS(fn, data),
            TP_STRUCT__entry(
-                   __field(xen_mc_callback_fn_t, fn)
+                   /*
+                    * Use field_struct to avoid is_signed_type()
+                    * comparison of a function pointer.
+                    */
+                   __field_struct(xen_mc_callback_fn_t, fn)
                    __field(void *, data)
                    ),
            TP_fast_assign(
index c160a53..f94f65d 100644 (file)
@@ -11,6 +11,8 @@
 #define PROT_WRITE     0x2             /* page can be written */
 #define PROT_EXEC      0x4             /* page can be executed */
 #define PROT_SEM       0x8             /* page may be used for atomic ops */
+/*                     0x10               reserved for arch-specific use */
+/*                     0x20               reserved for arch-specific use */
 #define PROT_NONE      0x0             /* page can not be accessed */
 #define PROT_GROWSDOWN 0x01000000      /* mprotect flag: extend change to start of growsdown vma */
 #define PROT_GROWSUP   0x02000000      /* mprotect flag: extend change to end of growsup vma */
index 98e2c49..4913539 100644 (file)
@@ -39,6 +39,7 @@ struct hidraw_devinfo {
 /* The first byte of SFEATURE and GFEATURE is the report number */
 #define HIDIOCSFEATURE(len)    _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
 #define HIDIOCGFEATURE(len)    _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
+#define HIDIOCGRAWUNIQ(len)     _IOC(_IOC_READ, 'H', 0x08, len)
 
 #define HIDRAW_FIRST_MINOR 0
 #define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
new file mode 100644 (file)
index 0000000..849ef15
--- /dev/null
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _USR_IDXD_H_
+#define _USR_IDXD_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/* Descriptor flags */
+#define IDXD_OP_FLAG_FENCE     0x0001
+#define IDXD_OP_FLAG_BOF       0x0002
+#define IDXD_OP_FLAG_CRAV      0x0004
+#define IDXD_OP_FLAG_RCR       0x0008
+#define IDXD_OP_FLAG_RCI       0x0010
+#define IDXD_OP_FLAG_CRSTS     0x0020
+#define IDXD_OP_FLAG_CR                0x0080
+#define IDXD_OP_FLAG_CC                0x0100
+#define IDXD_OP_FLAG_ADDR1_TCS 0x0200
+#define IDXD_OP_FLAG_ADDR2_TCS 0x0400
+#define IDXD_OP_FLAG_ADDR3_TCS 0x0800
+#define IDXD_OP_FLAG_CR_TCS    0x1000
+#define IDXD_OP_FLAG_STORD     0x2000
+#define IDXD_OP_FLAG_DRDBK     0x4000
+#define IDXD_OP_FLAG_DSTS      0x8000
+
+/* Opcode */
+enum dsa_opcode {
+       DSA_OPCODE_NOOP = 0,
+       DSA_OPCODE_BATCH,
+       DSA_OPCODE_DRAIN,
+       DSA_OPCODE_MEMMOVE,
+       DSA_OPCODE_MEMFILL,
+       DSA_OPCODE_COMPARE,
+       DSA_OPCODE_COMPVAL,
+       DSA_OPCODE_CR_DELTA,
+       DSA_OPCODE_AP_DELTA,
+       DSA_OPCODE_DUALCAST,
+       DSA_OPCODE_CRCGEN = 0x10,
+       DSA_OPCODE_COPY_CRC,
+       DSA_OPCODE_DIF_CHECK,
+       DSA_OPCODE_DIF_INS,
+       DSA_OPCODE_DIF_STRP,
+       DSA_OPCODE_DIF_UPDT,
+       DSA_OPCODE_CFLUSH = 0x20,
+};
+
+/* Completion record status */
+enum dsa_completion_status {
+       DSA_COMP_NONE = 0,
+       DSA_COMP_SUCCESS,
+       DSA_COMP_SUCCESS_PRED,
+       DSA_COMP_PAGE_FAULT_NOBOF,
+       DSA_COMP_PAGE_FAULT_IR,
+       DSA_COMP_BATCH_FAIL,
+       DSA_COMP_BATCH_PAGE_FAULT,
+       DSA_COMP_DR_OFFSET_NOINC,
+       DSA_COMP_DR_OFFSET_ERANGE,
+       DSA_COMP_DIF_ERR,
+       DSA_COMP_BAD_OPCODE = 0x10,
+       DSA_COMP_INVALID_FLAGS,
+       DSA_COMP_NOZERO_RESERVE,
+       DSA_COMP_XFER_ERANGE,
+       DSA_COMP_DESC_CNT_ERANGE,
+       DSA_COMP_DR_ERANGE,
+       DSA_COMP_OVERLAP_BUFFERS,
+       DSA_COMP_DCAST_ERR,
+       DSA_COMP_DESCLIST_ALIGN,
+       DSA_COMP_INT_HANDLE_INVAL,
+       DSA_COMP_CRA_XLAT,
+       DSA_COMP_CRA_ALIGN,
+       DSA_COMP_ADDR_ALIGN,
+       DSA_COMP_PRIV_BAD,
+       DSA_COMP_TRAFFIC_CLASS_CONF,
+       DSA_COMP_PFAULT_RDBA,
+       DSA_COMP_HW_ERR1,
+       DSA_COMP_HW_ERR_DRB,
+       DSA_COMP_TRANSLATION_FAIL,
+};
+
+#define DSA_COMP_STATUS_MASK           0x7f
+#define DSA_COMP_STATUS_WRITE          0x80
+
+struct dsa_batch_desc {
+       uint32_t        pasid:20;
+       uint32_t        rsvd:11;
+       uint32_t        priv:1;
+       uint32_t        flags:24;
+       uint32_t        opcode:8;
+       uint64_t        completion_addr;
+       uint64_t        desc_list_addr;
+       uint64_t        rsvd1;
+       uint32_t        desc_count;
+       uint16_t        interrupt_handle;
+       uint16_t        rsvd2;
+       uint8_t         rsvd3[24];
+} __attribute__((packed));
+
+struct dsa_hw_desc {
+       uint32_t        pasid:20;
+       uint32_t        rsvd:11;
+       uint32_t        priv:1;
+       uint32_t        flags:24;
+       uint32_t        opcode:8;
+       uint64_t        completion_addr;
+       union {
+               uint64_t        src_addr;
+               uint64_t        rdback_addr;
+               uint64_t        pattern;
+       };
+       union {
+               uint64_t        dst_addr;
+               uint64_t        rdback_addr2;
+               uint64_t        src2_addr;
+               uint64_t        comp_pattern;
+       };
+       uint32_t        xfer_size;
+       uint16_t        int_handle;
+       uint16_t        rsvd1;
+       union {
+               uint8_t         expected_res;
+               struct {
+                       uint64_t        delta_addr;
+                       uint32_t        max_delta_size;
+               };
+               uint32_t        delta_rec_size;
+               uint64_t        dest2;
+               /* CRC */
+               struct {
+                       uint32_t        crc_seed;
+                       uint32_t        crc_rsvd;
+                       uint64_t        seed_addr;
+               };
+               /* DIF check or strip */
+               struct {
+                       uint8_t         src_dif_flags;
+                       uint8_t         dif_chk_res;
+                       uint8_t         dif_chk_flags;
+                       uint8_t         dif_chk_res2[5];
+                       uint32_t        chk_ref_tag_seed;
+                       uint16_t        chk_app_tag_mask;
+                       uint16_t        chk_app_tag_seed;
+               };
+               /* DIF insert */
+               struct {
+                       uint8_t         dif_ins_res;
+                       uint8_t         dest_dif_flag;
+                       uint8_t         dif_ins_flags;
+                       uint8_t         dif_ins_res2[13];
+                       uint32_t        ins_ref_tag_seed;
+                       uint16_t        ins_app_tag_mask;
+                       uint16_t        ins_app_tag_seed;
+               };
+               /* DIF update */
+               struct {
+                       uint8_t         src_upd_flags;
+                       uint8_t         upd_dest_flags;
+                       uint8_t         dif_upd_flags;
+                       uint8_t         dif_upd_res[5];
+                       uint32_t        src_ref_tag_seed;
+                       uint16_t        src_app_tag_mask;
+                       uint16_t        src_app_tag_seed;
+                       uint32_t        dest_ref_tag_seed;
+                       uint16_t        dest_app_tag_mask;
+                       uint16_t        dest_app_tag_seed;
+               };
+
+               uint8_t         op_specific[24];
+       };
+} __attribute__((packed));
+
+struct dsa_raw_desc {
+       uint64_t        field[8];
+} __attribute__((packed));
+
+/*
+ * The status field will be modified by hardware, therefore it should be
+ * volatile and prevent the compiler from optimize the read.
+ */
+struct dsa_completion_record {
+       volatile uint8_t        status;
+       union {
+               uint8_t         result;
+               uint8_t         dif_status;
+       };
+       uint16_t                rsvd;
+       uint32_t                bytes_completed;
+       uint64_t                fault_addr;
+       union {
+               uint16_t        delta_rec_size;
+               uint16_t        crc_val;
+
+               /* DIF check & strip */
+               struct {
+                       uint32_t        dif_chk_ref_tag;
+                       uint16_t        dif_chk_app_tag_mask;
+                       uint16_t        dif_chk_app_tag;
+               };
+
+               /* DIF insert */
+               struct {
+                       uint64_t        dif_ins_res;
+                       uint32_t        dif_ins_ref_tag;
+                       uint16_t        dif_ins_app_tag_mask;
+                       uint16_t        dif_ins_app_tag;
+               };
+
+               /* DIF update */
+               struct {
+                       uint32_t        dif_upd_src_ref_tag;
+                       uint16_t        dif_upd_src_app_tag_mask;
+                       uint16_t        dif_upd_src_app_tag;
+                       uint32_t        dif_upd_dest_ref_tag;
+                       uint16_t        dif_upd_dest_app_tag_mask;
+                       uint16_t        dif_upd_dest_app_tag;
+               };
+
+               uint8_t         op_specific[16];
+       };
+} __attribute__((packed));
+
+struct dsa_raw_completion_record {
+       uint64_t        field[4];
+} __attribute__((packed));
+
+#endif
index a3300e1..55cfcb7 100644 (file)
@@ -178,7 +178,8 @@ struct io_uring_params {
 
 struct io_uring_files_update {
        __u32 offset;
-       __s32 *fds;
+       __u32 resv;
+       __aligned_u64 /* __s32 * */ fds;
 };
 
 #endif
index bc933c0..f977786 100644 (file)
@@ -159,6 +159,10 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 
        kimage_terminate(image);
 
+       ret = machine_kexec_post_load(image);
+       if (ret)
+               goto out;
+
        /* Install the new kernel and uninstall the old */
        image = xchg(dest_image, image);
 
index 15d70a9..c19c0da 100644 (file)
@@ -589,6 +589,12 @@ static void kimage_free_extra_pages(struct kimage *image)
        kimage_free_page_list(&image->unusable_pages);
 
 }
+
+int __weak machine_kexec_post_load(struct kimage *image)
+{
+       return 0;
+}
+
 void kimage_terminate(struct kimage *image)
 {
        if (*image->entry != 0)
@@ -1171,7 +1177,7 @@ int kernel_kexec(void)
                 * CPU hotplug again; so re-enable it here.
                 */
                cpu_hotplug_enable();
-               pr_emerg("Starting new kernel\n");
+               pr_notice("Starting new kernel\n");
                machine_shutdown();
        }
 
index a2df939..faa74d5 100644 (file)
@@ -441,6 +441,10 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
 
        kimage_terminate(image);
 
+       ret = machine_kexec_post_load(image);
+       if (ret)
+               goto out;
+
        /*
         * Free up any temporary buffers allocated which are not needed
         * after image has been loaded
index 48aaf2a..39d30cc 100644 (file)
@@ -13,6 +13,8 @@ void kimage_terminate(struct kimage *image);
 int kimage_is_destination_range(struct kimage *image,
                                unsigned long start, unsigned long end);
 
+int machine_kexec_post_load(struct kimage *image);
+
 extern struct mutex kexec_mutex;
 
 #ifdef CONFIG_KEXEC_FILE
index 26b9168..d65f2d5 100644 (file)
@@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
 
 void clear_free_pages(void)
 {
-#ifdef CONFIG_PAGE_POISONING_ZERO
        struct memory_bitmap *bm = free_pages_map;
        unsigned long pfn;
 
        if (WARN_ON(!(free_pages_map)))
                return;
 
-       memory_bm_position_reset(bm);
-       pfn = memory_bm_next_pfn(bm);
-       while (pfn != BM_END_OF_MAP) {
-               if (pfn_valid(pfn))
-                       clear_highpage(pfn_to_page(pfn));
-
+       if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
+               memory_bm_position_reset(bm);
                pfn = memory_bm_next_pfn(bm);
+               while (pfn != BM_END_OF_MAP) {
+                       if (pfn_valid(pfn))
+                               clear_highpage(pfn_to_page(pfn));
+
+                       pfn = memory_bm_next_pfn(bm);
+               }
+               memory_bm_position_reset(bm);
+               pr_info("free pages cleared after restore\n");
        }
-       memory_bm_position_reset(bm);
-       pr_info("free pages cleared after restore\n");
-#endif /* PAGE_POISONING_ZERO */
 }
 
 /**
index ddb7e7f..5b6ee4a 100644 (file)
@@ -9420,6 +9420,11 @@ __init static int tracing_set_default_clock(void)
 {
        /* sched_clock_stable() is determined in late_initcall */
        if (!trace_boot_clock && !sched_clock_stable()) {
+               if (security_locked_down(LOCKDOWN_TRACEFS)) {
+                       pr_warn("Can not set tracing clock due to lockdown\n");
+                       return -EPERM;
+               }
+
                printk(KERN_WARNING
                       "Unstable clock detected, switching default tracing clock to \"global\"\n"
                       "If you want to keep using the local clock, then add:\n"
index f62de5f..6ac35b9 100644 (file)
@@ -116,6 +116,7 @@ struct hist_field {
        struct ftrace_event_field       *field;
        unsigned long                   flags;
        hist_field_fn_t                 fn;
+       unsigned int                    ref;
        unsigned int                    size;
        unsigned int                    offset;
        unsigned int                    is_signed;
@@ -1766,11 +1767,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
        struct event_trigger_data *test;
        struct hist_field *hist_field;
 
+       lockdep_assert_held(&event_mutex);
+
        hist_field = find_var_field(hist_data, var_name);
        if (hist_field)
                return hist_field;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@ -1820,7 +1823,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
        struct event_trigger_data *test;
        struct hist_field *hist_field;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@ -2423,8 +2428,16 @@ static int contains_operator(char *str)
        return field_op;
 }
 
+static void get_hist_field(struct hist_field *hist_field)
+{
+       hist_field->ref++;
+}
+
 static void __destroy_hist_field(struct hist_field *hist_field)
 {
+       if (--hist_field->ref > 1)
+               return;
+
        kfree(hist_field->var.name);
        kfree(hist_field->name);
        kfree(hist_field->type);
@@ -2466,6 +2479,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
        if (!hist_field)
                return NULL;
 
+       hist_field->ref = 1;
+
        hist_field->hist_data = hist_data;
 
        if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
@@ -2661,6 +2676,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
 {
        unsigned long flags = HIST_FIELD_FL_VAR_REF;
        struct hist_field *ref_field;
+       int i;
+
+       /* Check if the variable already exists */
+       for (i = 0; i < hist_data->n_var_refs; i++) {
+               ref_field = hist_data->var_refs[i];
+               if (ref_field->var.idx == var_field->var.idx &&
+                   ref_field->var.hist_data == var_field->hist_data) {
+                       get_hist_field(ref_field);
+                       return ref_field;
+               }
+       }
 
        ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
        if (ref_field) {
@@ -3115,7 +3141,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
 {
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (test->private_data == hist_data)
                                return test->filter_str;
@@ -3166,9 +3194,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
        struct event_trigger_data *test;
        unsigned int n_keys;
 
+       lockdep_assert_held(&event_mutex);
+
        n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
 
@@ -5528,7 +5558,7 @@ static int hist_show(struct seq_file *m, void *v)
                goto out_unlock;
        }
 
-       list_for_each_entry_rcu(data, &event_file->triggers, list) {
+       list_for_each_entry(data, &event_file->triggers, list) {
                if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
                        hist_trigger_show(m, data, n++);
        }
@@ -5921,7 +5951,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
        if (hist_data->attrs->name && !named_data)
                goto new;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6005,10 +6037,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
        struct event_trigger_data *test, *named_data = NULL;
        bool match = false;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (hist_trigger_match(data, test, named_data, false)) {
                                match = true;
@@ -6026,10 +6060,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
        struct hist_trigger_data *hist_data = data->private_data;
        struct event_trigger_data *test, *named_data = NULL;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6051,10 +6087,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *test, *named_data = NULL;
        bool unregistered = false;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6080,7 +6118,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
        struct hist_trigger_data *hist_data;
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
                        if (check_var_refs(hist_data))
@@ -6323,7 +6363,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
        struct enable_trigger_data *enable_data = data->private_data;
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
+       list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
+                               lockdep_is_held(&event_mutex)) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (enable_data->enable)
                                test->paused = false;
index 2cd53ca..40106ff 100644 (file)
@@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
        struct event_trigger_data *data;
        bool set_cond = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                if (data->filter || event_command_post_trigger(data->cmd_ops) ||
                    event_command_needs_rec(data->cmd_ops)) {
                        set_cond = true;
@@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *test;
        int ret = 0;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
                        ret = -EEXIST;
                        goto out;
@@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *data;
        bool unregistered = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
                        unregistered = true;
                        list_del_rcu(&data->list);
@@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
        struct event_trigger_data *test;
        int ret = 0;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                test_enable_data = test->private_data;
                if (test_enable_data &&
                    (test->cmd_ops->trigger_type ==
@@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
        struct event_trigger_data *data;
        bool unregistered = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                enable_data = data->private_data;
                if (enable_data &&
                    (data->cmd_ops->trigger_type ==
index 7f89026..3f54dc2 100644 (file)
@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
        INIT_HLIST_NODE(&tk->rp.kp.hlist);
        INIT_LIST_HEAD(&tk->rp.kp.list);
 
-       ret = trace_probe_init(&tk->tp, event, group);
+       ret = trace_probe_init(&tk->tp, event, group, false);
        if (ret < 0)
                goto error;
 
index 905b10a..9ae87be 100644 (file)
@@ -984,15 +984,19 @@ void trace_probe_cleanup(struct trace_probe *tp)
 }
 
 int trace_probe_init(struct trace_probe *tp, const char *event,
-                    const char *group)
+                    const char *group, bool alloc_filter)
 {
        struct trace_event_call *call;
+       size_t size = sizeof(struct trace_probe_event);
        int ret = 0;
 
        if (!event || !group)
                return -EINVAL;
 
-       tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
+       if (alloc_filter)
+               size += sizeof(struct trace_uprobe_filter);
+
+       tp->event = kzalloc(size, GFP_KERNEL);
        if (!tp->event)
                return -ENOMEM;
 
index 4ee7037..a0ff9e2 100644 (file)
@@ -223,6 +223,12 @@ struct probe_arg {
        const struct fetch_type *type;  /* Type of this argument */
 };
 
+struct trace_uprobe_filter {
+       rwlock_t                rwlock;
+       int                     nr_systemwide;
+       struct list_head        perf_events;
+};
+
 /* Event call and class holder */
 struct trace_probe_event {
        unsigned int                    flags;  /* For TP_FLAG_* */
@@ -230,6 +236,7 @@ struct trace_probe_event {
        struct trace_event_call         call;
        struct list_head                files;
        struct list_head                probes;
+       struct trace_uprobe_filter      filter[0];
 };
 
 struct trace_probe {
@@ -322,7 +329,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
 }
 
 int trace_probe_init(struct trace_probe *tp, const char *event,
-                    const char *group);
+                    const char *group, bool alloc_filter);
 void trace_probe_cleanup(struct trace_probe *tp);
 int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
 void trace_probe_unlink(struct trace_probe *tp);
index 352073d..2619bc5 100644 (file)
@@ -34,12 +34,6 @@ struct uprobe_trace_entry_head {
 #define DATAOF_TRACE_ENTRY(entry, is_return)           \
        ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
 
-struct trace_uprobe_filter {
-       rwlock_t                rwlock;
-       int                     nr_systemwide;
-       struct list_head        perf_events;
-};
-
 static int trace_uprobe_create(int argc, const char **argv);
 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
 static int trace_uprobe_release(struct dyn_event *ev);
@@ -60,7 +54,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
  */
 struct trace_uprobe {
        struct dyn_event                devent;
-       struct trace_uprobe_filter      filter;
        struct uprobe_consumer          consumer;
        struct path                     path;
        struct inode                    *inode;
@@ -351,7 +344,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        if (!tu)
                return ERR_PTR(-ENOMEM);
 
-       ret = trace_probe_init(&tu->tp, event, group);
+       ret = trace_probe_init(&tu->tp, event, group, true);
        if (ret < 0)
                goto error;
 
@@ -359,7 +352,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        tu->consumer.handler = uprobe_dispatcher;
        if (is_ret)
                tu->consumer.ret_handler = uretprobe_dispatcher;
-       init_trace_uprobe_filter(&tu->filter);
+       init_trace_uprobe_filter(tu->tp.event->filter);
        return tu;
 
 error:
@@ -1067,13 +1060,14 @@ static void __probe_event_disable(struct trace_probe *tp)
        struct trace_probe *pos;
        struct trace_uprobe *tu;
 
+       tu = container_of(tp, struct trace_uprobe, tp);
+       WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
+
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
                if (!tu->inode)
                        continue;
 
-               WARN_ON(!uprobe_filter_is_empty(&tu->filter));
-
                uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
                tu->inode = NULL;
        }
@@ -1108,7 +1102,7 @@ static int probe_event_enable(struct trace_event_call *call,
        }
 
        tu = container_of(tp, struct trace_uprobe, tp);
-       WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+       WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
 
        if (enabled)
                return 0;
@@ -1205,39 +1199,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
 }
 
 static inline bool
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
+                         struct perf_event *event)
 {
-       return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
+       return __uprobe_perf_filter(filter, event->hw.target->mm);
 }
 
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
+                                      struct perf_event *event)
 {
        bool done;
 
-       write_lock(&tu->filter.rwlock);
+       write_lock(&filter->rwlock);
        if (event->hw.target) {
                list_del(&event->hw.tp_list);
-               done = tu->filter.nr_systemwide ||
+               done = filter->nr_systemwide ||
                        (event->hw.target->flags & PF_EXITING) ||
-                       uprobe_filter_event(tu, event);
+                       trace_uprobe_filter_event(filter, event);
        } else {
-               tu->filter.nr_systemwide--;
-               done = tu->filter.nr_systemwide;
+               filter->nr_systemwide--;
+               done = filter->nr_systemwide;
        }
-       write_unlock(&tu->filter.rwlock);
+       write_unlock(&filter->rwlock);
 
-       if (!done)
-               return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
-
-       return 0;
+       return done;
 }
 
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
+/* This returns true if the filter always covers target mm */
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
+                                   struct perf_event *event)
 {
        bool done;
-       int err;
 
-       write_lock(&tu->filter.rwlock);
+       write_lock(&filter->rwlock);
        if (event->hw.target) {
                /*
                 * event->parent != NULL means copy_process(), we can avoid
@@ -1247,28 +1241,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
                 * attr.enable_on_exec means that exec/mmap will install the
                 * breakpoints we need.
                 */
-               done = tu->filter.nr_systemwide ||
+               done = filter->nr_systemwide ||
                        event->parent || event->attr.enable_on_exec ||
-                       uprobe_filter_event(tu, event);
-               list_add(&event->hw.tp_list, &tu->filter.perf_events);
+                       trace_uprobe_filter_event(filter, event);
+               list_add(&event->hw.tp_list, &filter->perf_events);
        } else {
-               done = tu->filter.nr_systemwide;
-               tu->filter.nr_systemwide++;
+               done = filter->nr_systemwide;
+               filter->nr_systemwide++;
        }
-       write_unlock(&tu->filter.rwlock);
+       write_unlock(&filter->rwlock);
 
-       err = 0;
-       if (!done) {
-               err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
-               if (err)
-                       uprobe_perf_close(tu, event);
-       }
-       return err;
+       return done;
 }
 
-static int uprobe_perf_multi_call(struct trace_event_call *call,
-                                 struct perf_event *event,
-               int (*op)(struct trace_uprobe *tu, struct perf_event *event))
+static int uprobe_perf_close(struct trace_event_call *call,
+                            struct perf_event *event)
 {
        struct trace_probe *pos, *tp;
        struct trace_uprobe *tu;
@@ -1278,25 +1265,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
        if (WARN_ON_ONCE(!tp))
                return -ENODEV;
 
+       tu = container_of(tp, struct trace_uprobe, tp);
+       if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
+               return 0;
+
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
-               ret = op(tu, event);
+               ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
                if (ret)
                        break;
        }
 
        return ret;
 }
+
+static int uprobe_perf_open(struct trace_event_call *call,
+                           struct perf_event *event)
+{
+       struct trace_probe *pos, *tp;
+       struct trace_uprobe *tu;
+       int err = 0;
+
+       tp = trace_probe_primary_from_call(call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENODEV;
+
+       tu = container_of(tp, struct trace_uprobe, tp);
+       if (trace_uprobe_filter_add(tu->tp.event->filter, event))
+               return 0;
+
+       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+               err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+               if (err) {
+                       uprobe_perf_close(call, event);
+                       break;
+               }
+       }
+
+       return err;
+}
+
 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
                                enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 {
+       struct trace_uprobe_filter *filter;
        struct trace_uprobe *tu;
        int ret;
 
        tu = container_of(uc, struct trace_uprobe, consumer);
-       read_lock(&tu->filter.rwlock);
-       ret = __uprobe_perf_filter(&tu->filter, mm);
-       read_unlock(&tu->filter.rwlock);
+       filter = tu->tp.event->filter;
+
+       read_lock(&filter->rwlock);
+       ret = __uprobe_perf_filter(filter, mm);
+       read_unlock(&filter->rwlock);
 
        return ret;
 }
@@ -1419,10 +1440,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
                return 0;
 
        case TRACE_REG_PERF_OPEN:
-               return uprobe_perf_multi_call(event, data, uprobe_perf_open);
+               return uprobe_perf_open(event, data);
 
        case TRACE_REG_PERF_CLOSE:
-               return uprobe_perf_multi_call(event, data, uprobe_perf_close);
+               return uprobe_perf_close(event, data);
 
 #endif
        default:
index 93217d4..c20b1de 100644 (file)
@@ -223,7 +223,7 @@ KASAN_SANITIZE_stackdepot.o := n
 KCOV_INSTRUMENT_stackdepot.o := n
 
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
-              fdt_empty_tree.o
+              fdt_empty_tree.o fdt_addresses.o
 $(foreach file, $(libfdt_files), \
        $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
 lib-$(CONFIG_LIBFDT) += $(libfdt_files)
diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c
new file mode 100644 (file)
index 0000000..23610bc
--- /dev/null
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
index fe5c413..f0b5a1d 100644 (file)
@@ -60,36 +60,43 @@ static int ptr_id(void *ptr)
  */
 static void *shadow_get(void *obj, unsigned long id)
 {
-       void *ret = klp_shadow_get(obj, id);
+       int **sv;
 
+       sv = klp_shadow_get(obj, id);
        pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
-               __func__, ptr_id(obj), id, ptr_id(ret));
+               __func__, ptr_id(obj), id, ptr_id(sv));
 
-       return ret;
+       return sv;
 }
 
 static void *shadow_alloc(void *obj, unsigned long id, size_t size,
                          gfp_t gfp_flags, klp_shadow_ctor_t ctor,
                          void *ctor_data)
 {
-       void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor,
-                                    ctor_data);
+       int **var = ctor_data;
+       int **sv;
+
+       sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
        pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
                __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
-               ptr_id(ctor_data), ptr_id(ret));
-       return ret;
+               ptr_id(*var), ptr_id(sv));
+
+       return sv;
 }
 
 static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
                                 gfp_t gfp_flags, klp_shadow_ctor_t ctor,
                                 void *ctor_data)
 {
-       void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor,
-                                           ctor_data);
+       int **var = ctor_data;
+       int **sv;
+
+       sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
        pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
                __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
-               ptr_id(ctor_data), ptr_id(ret));
-       return ret;
+               ptr_id(*var), ptr_id(sv));
+
+       return sv;
 }
 
 static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
@@ -110,58 +117,70 @@ static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
 /* Shadow variable constructor - remember simple pointer data */
 static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
 {
-       int **shadow_int = shadow_data;
-       *shadow_int = ctor_data;
+       int **sv = shadow_data;
+       int **var = ctor_data;
+
+       if (!var)
+               return -EINVAL;
+
+       *sv = *var;
        pr_info("%s: PTR%d -> PTR%d\n",
-               __func__, ptr_id(shadow_int), ptr_id(ctor_data));
+               __func__, ptr_id(sv), ptr_id(*var));
 
        return 0;
 }
 
 static void shadow_dtor(void *obj, void *shadow_data)
 {
+       int **sv = shadow_data;
+
        pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
-               __func__, ptr_id(obj), ptr_id(shadow_data));
+               __func__, ptr_id(obj), ptr_id(sv));
 }
 
 static int test_klp_shadow_vars_init(void)
 {
        void *obj                       = THIS_MODULE;
        int id                  = 0x1234;
-       size_t size             = sizeof(int *);
        gfp_t gfp_flags         = GFP_KERNEL;
 
        int var1, var2, var3, var4;
+       int *pv1, *pv2, *pv3, *pv4;
        int **sv1, **sv2, **sv3, **sv4;
 
-       void *ret;
+       int **sv;
+
+       pv1 = &var1;
+       pv2 = &var2;
+       pv3 = &var3;
+       pv4 = &var4;
 
        ptr_id(NULL);
-       ptr_id(&var1);
-       ptr_id(&var2);
-       ptr_id(&var3);
-       ptr_id(&var4);
+       ptr_id(pv1);
+       ptr_id(pv2);
+       ptr_id(pv3);
+       ptr_id(pv4);
 
        /*
         * With an empty shadow variable hash table, expect not to find
         * any matches.
         */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        /*
         * Allocate a few shadow variables with different <obj> and <id>.
         */
-       sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1);
+       sv1 = shadow_alloc(obj, id, sizeof(pv1), gfp_flags, shadow_ctor, &pv1);
        if (!sv1)
                return -ENOMEM;
 
-       sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2);
+       sv2 = shadow_alloc(obj + 1, id, sizeof(pv2), gfp_flags, shadow_ctor, &pv2);
        if (!sv2)
                return -ENOMEM;
 
-       sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3);
+       sv3 = shadow_alloc(obj, id + 1, sizeof(pv3), gfp_flags, shadow_ctor, &pv3);
        if (!sv3)
                return -ENOMEM;
 
@@ -169,23 +188,23 @@ static int test_klp_shadow_vars_init(void)
         * Verify we can find our new shadow variables and that they point
         * to expected data.
         */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv1 && *sv1 == &var1)
+       if (sv == sv1 && *sv1 == pv1)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv1), ptr_id(*sv1));
 
-       ret = shadow_get(obj + 1, id);
-       if (!ret)
+       sv = shadow_get(obj + 1, id);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv2 && *sv2 == &var2)
+       if (sv == sv2 && *sv2 == pv2)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv2), ptr_id(*sv2));
-       ret = shadow_get(obj, id + 1);
-       if (!ret)
+       sv = shadow_get(obj, id + 1);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv3 && *sv3 == &var3)
+       if (sv == sv3 && *sv3 == pv3)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv3), ptr_id(*sv3));
 
@@ -193,14 +212,14 @@ static int test_klp_shadow_vars_init(void)
         * Allocate or get a few more, this time with the same <obj>, <id>.
         * The second invocation should return the same shadow var.
         */
-       sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
+       sv4 = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
        if (!sv4)
                return -ENOMEM;
 
-       ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
-       if (!ret)
+       sv = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv4 && *sv4 == &var4)
+       if (sv == sv4 && *sv4 == pv4)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv4), ptr_id(*sv4));
 
@@ -209,27 +228,27 @@ static int test_klp_shadow_vars_init(void)
         * longer find them.
         */
        shadow_free(obj, id, shadow_dtor);                      /* sv1 */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        shadow_free(obj + 1, id, shadow_dtor);                  /* sv2 */
-       ret = shadow_get(obj + 1, id);
-       if (!ret)
+       sv = shadow_get(obj + 1, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        shadow_free(obj + 2, id, shadow_dtor);                  /* sv4 */
-       ret = shadow_get(obj + 2, id);
-       if (!ret)
+       sv = shadow_get(obj + 2, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        /*
         * We should still find an <id+1> variable.
         */
-       ret = shadow_get(obj, id + 1);
-       if (!ret)
+       sv = shadow_get(obj, id + 1);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv3 && *sv3 == &var3)
+       if (sv == sv3 && *sv3 == pv3)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv3), ptr_id(*sv3));
 
@@ -237,8 +256,8 @@ static int test_klp_shadow_vars_init(void)
         * Free all the <id+1> variables, too.
         */
        shadow_free_all(id + 1, shadow_dtor);                   /* sv3 */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                pr_info("  shadow_get() got expected NULL result\n");
 
 
index dccb95a..706020b 100644 (file)
@@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
        unsigned long res = 0;
 
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
        if (IS_UNALIGNED(src, dst))
                goto byte_at_a_time;
 
@@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               /*
+                * Truncate 'max' to the user-specified limit, so that
+                * we only have one limit we need to check in the loop
+                */
+               if (max > count)
+                       max = count;
+
                kasan_check_write(dst, count);
                check_object_size(dst, count, false);
                if (user_access_begin(src, max)) {
index 6c0005d..41670d4 100644 (file)
@@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        unsigned long align, res = 0;
        unsigned long c;
 
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
        /*
         * Do everything aligned. But that means that we
         * need to also expand the maximum..
@@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               /*
+                * Truncate 'max' to the user-specified limit, so that
+                * we only have one limit we need to check in the loop
+                */
+               if (max > count)
+                       max = count;
+
                if (user_access_begin(str, max)) {
                        retval = do_strnlen_user(str, count, max);
                        user_access_end();
index 7df4f7f..55c14e8 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * test_xarray.c: Test the XArray API
  * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
  * Author: Matthew Wilcox <willy@infradead.org>
  */
 
@@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
 {
 #ifdef CONFIG_XARRAY_MULTI
+       unsigned long multi = 3 << order;
+       unsigned long next = 4 << order;
        unsigned long index;
 
-       xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
-       XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+       xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+       XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+       XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
 
        index = 0;
        XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(12));
-       XA_BUG_ON(xa, index != 12);
-       index = 13;
+                       xa_mk_value(multi));
+       XA_BUG_ON(xa, index != multi);
+       index = multi + 1;
        XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(12));
-       XA_BUG_ON(xa, (index < 12) || (index >= 16));
+                       xa_mk_value(multi));
+       XA_BUG_ON(xa, (index < multi) || (index >= next));
        XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(16));
-       XA_BUG_ON(xa, index != 16);
-
-       xa_erase_index(xa, 12);
-       xa_erase_index(xa, 16);
+                       xa_mk_value(next));
+       XA_BUG_ON(xa, index != next);
+       XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+       XA_BUG_ON(xa, index != next);
+
+       xa_erase_index(xa, multi);
+       xa_erase_index(xa, next);
+       xa_erase_index(xa, next + 1);
        XA_BUG_ON(xa, !xa_empty(xa));
 #endif
 }
@@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
        xa_destroy(xa);
 }
 
+static noinline void check_find_4(struct xarray *xa)
+{
+       unsigned long index = 0;
+       void *entry;
+
+       xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+       entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+       XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+       entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+       XA_BUG_ON(xa, entry);
+
+       xa_erase_index(xa, ULONG_MAX);
+}
+
 static noinline void check_find(struct xarray *xa)
 {
+       unsigned i;
+
        check_find_1(xa);
        check_find_2(xa);
        check_find_3(xa);
-       check_multi_find(xa);
+       check_find_4(xa);
+
+       for (i = 2; i < 10; i++)
+               check_multi_find_1(xa, i);
        check_multi_find_2(xa);
 }
 
@@ -1132,6 +1160,27 @@ static noinline void check_move_tiny(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_move_max(struct xarray *xa)
+{
+       XA_STATE(xas, xa, 0);
+
+       xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+       rcu_read_unlock();
+
+       xas_set(&xas, 0);
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+       xas_pause(&xas);
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+       rcu_read_unlock();
+
+       xa_erase_index(xa, ULONG_MAX);
+       XA_BUG_ON(xa, !xa_empty(xa));
+}
+
 static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 {
        XA_STATE(xas, xa, 0);
@@ -1240,6 +1289,7 @@ static noinline void check_move(struct xarray *xa)
        xa_destroy(xa);
 
        check_move_tiny(xa);
+       check_move_max(xa);
 
        for (i = 0; i < 16; i++)
                check_move_small(xa, 1UL << i);
index 1237c21..1d9fab7 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
  * XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
  * Author: Matthew Wilcox <willy@infradead.org>
  */
 
@@ -967,6 +968,7 @@ void xas_pause(struct xa_state *xas)
        if (xas_invalid(xas))
                return;
 
+       xas->xa_node = XAS_RESTART;
        if (node) {
                unsigned int offset = xas->xa_offset;
                while (++offset < XA_CHUNK_SIZE) {
@@ -974,10 +976,11 @@ void xas_pause(struct xa_state *xas)
                                break;
                }
                xas->xa_index += (offset - xas->xa_offset) << node->shift;
+               if (xas->xa_index == 0)
+                       xas->xa_node = XAS_BOUNDS;
        } else {
                xas->xa_index++;
        }
-       xas->xa_node = XAS_RESTART;
 }
 EXPORT_SYMBOL_GPL(xas_pause);
 
@@ -1079,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max)
 {
        void *entry;
 
-       if (xas_error(xas))
+       if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
                return NULL;
+       if (xas->xa_index > max)
+               return set_bounds(xas);
 
        if (!xas->xa_node) {
                xas->xa_index = 1;
                return set_bounds(xas);
-       } else if (xas_top(xas->xa_node)) {
+       } else if (xas->xa_node == XAS_RESTART) {
                entry = xas_load(xas);
                if (entry || xas_not_node(xas->xa_node))
                        return entry;
@@ -1150,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
 
        if (xas_error(xas))
                return NULL;
+       if (xas->xa_index > max)
+               goto max;
 
        if (!xas->xa_node) {
                xas->xa_index = 1;
@@ -1824,6 +1831,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
 }
 EXPORT_SYMBOL(xa_find);
 
+static bool xas_sibling(struct xa_state *xas)
+{
+       struct xa_node *node = xas->xa_node;
+       unsigned long mask;
+
+       if (!node)
+               return false;
+       mask = (XA_CHUNK_SIZE << node->shift) - 1;
+       return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+}
+
 /**
  * xa_find_after() - Search the XArray for a present entry.
  * @xa: XArray.
@@ -1847,21 +1865,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
        XA_STATE(xas, xa, *indexp + 1);
        void *entry;
 
+       if (xas.xa_index == 0)
+               return NULL;
+
        rcu_read_lock();
        for (;;) {
                if ((__force unsigned int)filter < XA_MAX_MARKS)
                        entry = xas_find_marked(&xas, max, filter);
                else
                        entry = xas_find(&xas, max);
-               if (xas.xa_node == XAS_BOUNDS)
+
+               if (xas_invalid(&xas))
                        break;
-               if (xas.xa_shift) {
-                       if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
-                               continue;
-               } else {
-                       if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
-                               continue;
-               }
+               if (xas_sibling(&xas))
+                       continue;
                if (!xas_retry(&xas, entry))
                        break;
        }
index d79221f..c318967 100644 (file)
@@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v)
 static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = vcc_walk(seq, 1);
-       if (v)
-               (*pos)++;
+       (*pos)++;
        return v;
 }
 
index 76bd678..a0116b9 100644 (file)
@@ -62,7 +62,7 @@ static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
        hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
 
        if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
-               pr_warn("Headroom to small\n");
+               pr_warn("Headroom too small\n");
                kfree_skb(skb);
                return -EIO;
        }
index 7e885d0..81befd0 100644 (file)
@@ -5491,9 +5491,29 @@ static void flush_all_backlogs(void)
        put_online_cpus();
 }
 
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static void gro_normal_list(struct napi_struct *napi)
+{
+       if (!napi->rx_count)
+               return;
+       netif_receive_skb_list_internal(&napi->rx_list);
+       INIT_LIST_HEAD(&napi->rx_list);
+       napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+{
+       list_add_tail(&skb->list, &napi->rx_list);
+       if (++napi->rx_count >= gro_normal_batch)
+               gro_normal_list(napi);
+}
+
 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
-static int napi_gro_complete(struct sk_buff *skb)
+static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct packet_offload *ptype;
        __be16 type = skb->protocol;
@@ -5526,7 +5546,8 @@ static int napi_gro_complete(struct sk_buff *skb)
        }
 
 out:
-       return netif_receive_skb_internal(skb);
+       gro_normal_one(napi, skb);
+       return NET_RX_SUCCESS;
 }
 
 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@ -5539,7 +5560,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
                if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
                        return;
                skb_list_del_init(skb);
-               napi_gro_complete(skb);
+               napi_gro_complete(napi, skb);
                napi->gro_hash[index].count--;
        }
 
@@ -5641,7 +5662,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
        }
 }
 
-static void gro_flush_oldest(struct list_head *head)
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
 {
        struct sk_buff *oldest;
 
@@ -5657,7 +5678,7 @@ static void gro_flush_oldest(struct list_head *head)
         * SKB to the chain.
         */
        skb_list_del_init(oldest);
-       napi_gro_complete(oldest);
+       napi_gro_complete(napi, oldest);
 }
 
 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
@@ -5733,7 +5754,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 
        if (pp) {
                skb_list_del_init(pp);
-               napi_gro_complete(pp);
+               napi_gro_complete(napi, pp);
                napi->gro_hash[hash].count--;
        }
 
@@ -5744,7 +5765,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                goto normal;
 
        if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
-               gro_flush_oldest(gro_head);
+               gro_flush_oldest(napi, gro_head);
        } else {
                napi->gro_hash[hash].count++;
        }
@@ -5802,26 +5823,6 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
-       if (!napi->rx_count)
-               return;
-       netif_receive_skb_list_internal(&napi->rx_list);
-       INIT_LIST_HEAD(&napi->rx_list);
-       napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
-{
-       list_add_tail(&skb->list, &napi->rx_list);
-       if (++napi->rx_count >= gro_normal_batch)
-               gro_normal_list(napi);
-}
-
 static void napi_skb_free_stolen_head(struct sk_buff *skb)
 {
        skb_dst_drop(skb);
@@ -6200,8 +6201,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                                 NAPIF_STATE_IN_BUSY_POLL)))
                return false;
 
-       gro_normal_list(n);
-
        if (n->gro_bitmask) {
                unsigned long timeout = 0;
 
@@ -6217,6 +6216,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                        hrtimer_start(&n->timer, ns_to_ktime(timeout),
                                      HRTIMER_MODE_REL_PINNED);
        }
+
+       gro_normal_list(n);
+
        if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
                local_irq_save(flags);
@@ -6548,8 +6550,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                goto out_unlock;
        }
 
-       gro_normal_list(n);
-
        if (n->gro_bitmask) {
                /* flush too old packets
                 * If HZ < 1000, flush all packets.
@@ -6557,6 +6557,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                napi_gro_flush(n, HZ >= 1000);
        }
 
+       gro_normal_list(n);
+
        /* Some drivers may have called napi_schedule
         * prior to exhausting their budget.
         */
@@ -8194,6 +8196,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(__dev_set_mtu);
 
+int dev_validate_mtu(struct net_device *dev, int new_mtu,
+                    struct netlink_ext_ack *extack)
+{
+       /* MTU must be positive, and in range */
+       if (new_mtu < 0 || new_mtu < dev->min_mtu) {
+               NL_SET_ERR_MSG(extack, "mtu less than device minimum");
+               return -EINVAL;
+       }
+
+       if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
+               NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /**
  *     dev_set_mtu_ext - Change maximum transfer unit
  *     @dev: device
@@ -8210,16 +8228,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
        if (new_mtu == dev->mtu)
                return 0;
 
-       /* MTU must be positive, and in range */
-       if (new_mtu < 0 || new_mtu < dev->min_mtu) {
-               NL_SET_ERR_MSG(extack, "mtu less than device minimum");
-               return -EINVAL;
-       }
-
-       if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
-               NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
-               return -EINVAL;
-       }
+       err = dev_validate_mtu(dev, new_mtu, extack);
+       if (err)
+               return err;
 
        if (!netif_device_present(dev))
                return -ENODEV;
@@ -9302,8 +9313,10 @@ int register_netdevice(struct net_device *dev)
                goto err_uninit;
 
        ret = netdev_register_kobject(dev);
-       if (ret)
+       if (ret) {
+               dev->reg_state = NETREG_UNREGISTERED;
                goto err_uninit;
+       }
        dev->reg_state = NETREG_REGISTERED;
 
        __netdev_update_features(dev);
index 920784a..789a73a 100644 (file)
@@ -3290,6 +3290,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu+1;
                return per_cpu_ptr(tbl->stats, cpu);
        }
+       (*pos)++;
        return NULL;
 }
 
index 02916f4..d9001b5 100644 (file)
@@ -3048,8 +3048,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
        dev->rtnl_link_ops = ops;
        dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
 
-       if (tb[IFLA_MTU])
-               dev->mtu = nla_get_u32(tb[IFLA_MTU]);
+       if (tb[IFLA_MTU]) {
+               u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+               int err;
+
+               err = dev_validate_mtu(dev, mtu, extack);
+               if (err) {
+                       free_netdev(dev);
+                       return ERR_PTR(err);
+               }
+               dev->mtu = mtu;
+       }
        if (tb[IFLA_ADDRESS]) {
                memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
                                nla_len(tb[IFLA_ADDRESS]));
index 3866d7e..ded2d52 100644 (file)
@@ -594,8 +594,6 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
 
 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 {
-       sock_owned_by_me(sk);
-
        sk_psock_cork_free(psock);
        sk_psock_zap_ingress(psock);
 
index 6b6e51d..1f31a39 100644 (file)
@@ -438,6 +438,23 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
+/**
+ * inet_proto_csum_replace16 - update layer 4 header checksum field
+ * @sum: Layer 4 header checksum field
+ * @skb: sk_buff for the packet
+ * @from: old IPv6 address
+ * @to: new IPv6 address
+ * @pseudohdr: True if layer 4 header checksum includes pseudoheader
+ *
+ * Update layer 4 header as per the update in IPv6 src/dst address.
+ *
+ * There is no need to update skb->csum in this function, because update in two
+ * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other
+ * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to
+ * update skb->csum, because update in 3 fields a.) IPv4 src/dst address,
+ * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as
+ * L4 Header checksum for skb->csum calculation.
+ */
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
                               bool pseudohdr)
@@ -449,9 +466,6 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                *sum = csum_fold(csum_partial(diff, sizeof(diff),
                                 ~csum_unfold(*sum)));
-               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
-                       skb->csum = ~csum_partial(diff, sizeof(diff),
-                                                 ~skb->csum);
        } else if (pseudohdr)
                *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
                                  csum_unfold(*sum)));
index d40de84..754d84b 100644 (file)
@@ -191,7 +191,7 @@ void hsr_debugfs_term(struct hsr_priv *priv);
 void hsr_debugfs_create_root(void);
 void hsr_debugfs_remove_root(void);
 #else
-static inline void void hsr_debugfs_rename(struct net_device *dev)
+static inline void hsr_debugfs_rename(struct net_device *dev)
 {
 }
 static inline void hsr_debugfs_init(struct hsr_priv *priv,
index 0e4a7cf..e2e219c 100644 (file)
@@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
                if (!x)
                        goto out_reset;
 
+               skb->mark = xfrm_smark_get(skb->mark, x);
+
                sp->xvec[sp->len++] = x;
                sp->olen++;
 
index 30fa771..dcc79ff 100644 (file)
@@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
        [FOU_ATTR_REMCSUM_NOPARTIAL]    = { .type = NLA_FLAG, },
        [FOU_ATTR_LOCAL_V4]             = { .type = NLA_U32, },
        [FOU_ATTR_PEER_V4]              = { .type = NLA_U32, },
-       [FOU_ATTR_LOCAL_V6]             = { .type = sizeof(struct in6_addr), },
-       [FOU_ATTR_PEER_V6]              = { .type = sizeof(struct in6_addr), },
+       [FOU_ATTR_LOCAL_V6]             = { .len = sizeof(struct in6_addr), },
+       [FOU_ATTR_PEER_V6]              = { .len = sizeof(struct in6_addr), },
        [FOU_ATTR_PEER_PORT]            = { .type = NLA_U16, },
        [FOU_ATTR_IFINDEX]              = { .type = NLA_S32, },
 };
index 0fe2a5d..74e1d96 100644 (file)
@@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
        iph->version            = 4;
        iph->ihl                = 5;
 
-       if (tunnel->collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
+       if (tunnel->collect_md)
                netif_keep_dst(dev);
-       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
index e90b600..37cddd1 100644 (file)
@@ -187,8 +187,17 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        int mtu;
 
        if (!dst) {
-               dev->stats.tx_carrier_errors++;
-               goto tx_error_icmp;
+               struct rtable *rt;
+
+               fl->u.ip4.flowi4_oif = dev->ifindex;
+               fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+               rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+               if (IS_ERR(rt)) {
+                       dev->stats.tx_carrier_errors++;
+                       goto tx_error_icmp;
+               }
+               dst = &rt->dst;
+               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
index 87e979f..e356ea7 100644 (file)
@@ -271,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu+1;
                return &per_cpu(rt_cache_stat, cpu);
        }
+       (*pos)++;
        return NULL;
 
 }
index d885ba8..a7d766e 100644 (file)
@@ -2524,6 +2524,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
 {
        struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
 
+       tcp_sk(sk)->highest_sack = NULL;
        while (p) {
                struct sk_buff *skb = rb_to_skb(p);
 
@@ -2614,7 +2615,6 @@ int tcp_disconnect(struct sock *sk, int flags)
        WRITE_ONCE(tp->write_seq, seq);
 
        icsk->icsk_backoff = 0;
-       tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
index a6545ef..6c4d79b 100644 (file)
@@ -779,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
         * bandwidth sample. Delivered is in packets and interval_us in uS and
         * ratio will be <<1 for most connections. So delivered is first scaled.
         */
-       bw = (u64)rs->delivered * BW_UNIT;
-       do_div(bw, rs->interval_us);
+       bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
 
        /* If this sample is application-limited, it is likely to have a very
         * low delivered count that represents application behavior rather than
index 5347ab2..2a976f5 100644 (file)
@@ -3164,6 +3164,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                        tp->retransmit_skb_hint = NULL;
                if (unlikely(skb == tp->lost_skb_hint))
                        tp->lost_skb_hint = NULL;
+               tcp_highest_sack_replace(sk, skb, next);
                tcp_rtx_queue_unlink_and_free(skb, sk);
        }
 
index 58c92a7..b62b59b 100644 (file)
@@ -3232,6 +3232,7 @@ int tcp_send_synack(struct sock *sk)
                        if (!nskb)
                                return -ENOMEM;
                        INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
+                       tcp_highest_sack_replace(sk, skb, nskb);
                        tcp_rtx_queue_unlink_and_free(skb, sk);
                        __skb_header_release(nskb);
                        tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
index 93a355b..030d43c 100644 (file)
@@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
        if (likely(partial)) {
                up->forward_deficit += size;
                size = up->forward_deficit;
-               if (size < (sk->sk_rcvbuf >> 2))
+               if (size < (sk->sk_rcvbuf >> 2) &&
+                   !skb_queue_empty(&up->reader_queue))
                        return;
        } else {
                size += up->forward_deficit;
index e31626f..fd53505 100644 (file)
@@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
                if (!x)
                        goto out_reset;
 
+               skb->mark = xfrm_smark_get(skb->mark, x);
+
                sp->xvec[sp->len++] = x;
                sp->olen++;
 
index 7bae6a9..cfae0a1 100644 (file)
@@ -2495,14 +2495,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
 
+       ++(*pos);
        if (!v)
                goto iter_table;
 
        n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
-       if (n) {
-               ++*pos;
+       if (n)
                return n;
-       }
 
 iter_table:
        ipv6_route_check_sernum(iter);
@@ -2510,8 +2509,6 @@ iter_table:
        r = fib6_walk_continue(&iter->w);
        spin_unlock_bh(&iter->tbl->tb6_lock);
        if (r > 0) {
-               if (v)
-                       ++*pos;
                return iter->w.leaf;
        } else if (r < 0) {
                fib6_walker_unlink(net, &iter->w);
index ee968d9..55bfc51 100644 (file)
@@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
                dev->mtu -= 8;
 
        if (tunnel->parms.collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
                netif_keep_dst(dev);
        }
        ip6gre_tnl_init_features(dev);
@@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netif_keep_dst(dev);
@@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev)
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netif_keep_dst(dev);
index 2f376db..b5dd20c 100644 (file)
@@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
        if (err)
                return err;
        ip6_tnl_link_config(t);
-       if (t->parms.collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
+       if (t->parms.collect_md)
                netif_keep_dst(dev);
-       }
        return 0;
 }
 
index 6f08b76..524006a 100644 (file)
@@ -449,8 +449,17 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int err = -1;
        int mtu;
 
-       if (!dst)
-               goto tx_err_link_failure;
+       if (!dst) {
+               fl->u.ip6.flowi6_oif = dev->ifindex;
+               fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+               dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+               if (dst->error) {
+                       dst_release(dst);
+                       dst = NULL;
+                       goto tx_err_link_failure;
+               }
+               skb_dst_set(skb, dst);
+       }
 
        dst_hold(dst);
        dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
index 85a5447..7cbc197 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/addrconf.h>
 #include <net/ip6_route.h>
 #include <net/dst_cache.h>
+#include <net/ip_tunnels.h>
 #ifdef CONFIG_IPV6_SEG6_HMAC
 #include <net/seg6_hmac.h>
 #endif
@@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
 
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
-       skb->encapsulation = 0;
+       if (iptunnel_pull_offloads(skb))
+               return false;
 
        return true;
 }
index 077a2cb..26ab0e9 100644 (file)
@@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
 
        if (set->extensions & IPSET_EXT_DESTROY)
                mtype_ext_cleanup(set);
-       memset(map->members, 0, map->memsize);
+       bitmap_zero(map->members, map->elements);
        set->elements = 0;
        set->ext_size = 0;
 }
index abe8f77..0a2196f 100644 (file)
@@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
 
 /* Type structure */
 struct bitmap_ip {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
            u32 first_ip, u32 last_ip,
            u32 elements, u32 hosts, u8 netmask)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_ip = first_ip;
@@ -322,7 +322,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (!map)
                return -ENOMEM;
 
-       map->memsize = bitmap_bytes(0, elements - 1);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ip;
        if (!init_map_ip(set, map, first_ip, last_ip,
                         elements, hosts, netmask)) {
index b618713..739e343 100644 (file)
@@ -42,7 +42,7 @@ enum {
 
 /* Type structure */
 struct bitmap_ipmac {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -299,7 +299,7 @@ static bool
 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
               u32 first_ip, u32 last_ip, u32 elements)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_ip = first_ip;
@@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (!map)
                return -ENOMEM;
 
-       map->memsize = bitmap_bytes(0, elements - 1);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ipmac;
        if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
                kfree(map);
index 23d6095..b49978d 100644 (file)
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
 
 /* Type structure */
 struct bitmap_port {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u16 first_port;         /* host byte order, included in range */
        u16 last_port;          /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -231,7 +231,7 @@ static bool
 init_map_port(struct ip_set *set, struct bitmap_port *map,
              u16 first_port, u16 last_port)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_port = first_port;
@@ -271,7 +271,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
                return -ENOMEM;
 
        map->elements = elements;
-       map->memsize = bitmap_bytes(0, map->elements);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_port;
        if (!init_map_port(set, map, first_port, last_port)) {
                kfree(map);
index 8dc892a..605e0f6 100644 (file)
@@ -1239,7 +1239,7 @@ static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
 
                        p = msg_end;
                        if (p + sizeof(s->v4) > buffer+buflen) {
-                               IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n");
                                return;
                        }
                        s = (union ip_vs_sync_conn *)p;
index 0399ae8..4f897b1 100644 (file)
@@ -114,7 +114,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        {
 /*     ORIGINAL        */
 /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
 /* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
 /* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
 /* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
@@ -130,7 +130,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
 /*     REPLY   */
 /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
 /* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
 /* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
 /* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
 /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
@@ -316,7 +316,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                        ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
                }
 
-               ct->proto.sctp.state = new_state;
+               ct->proto.sctp.state = SCTP_CONNTRACK_NONE;
        }
 
        return true;
index 65f51a2..7e63b48 100644 (file)
@@ -552,48 +552,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
 
 static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
 
+static const struct nft_chain_type *
+__nft_chain_type_get(u8 family, enum nft_chain_types type)
+{
+       if (family >= NFPROTO_NUMPROTO ||
+           type >= NFT_CHAIN_T_MAX)
+               return NULL;
+
+       return chain_type[family][type];
+}
+
 static const struct nft_chain_type *
 __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
 {
+       const struct nft_chain_type *type;
        int i;
 
        for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
-               if (chain_type[family][i] != NULL &&
-                   !nla_strcmp(nla, chain_type[family][i]->name))
-                       return chain_type[family][i];
+               type = __nft_chain_type_get(family, i);
+               if (!type)
+                       continue;
+               if (!nla_strcmp(nla, type->name))
+                       return type;
        }
        return NULL;
 }
 
-/*
- * Loading a module requires dropping mutex that guards the transaction.
- * A different client might race to start a new transaction meanwhile. Zap the
- * list of pending transaction and then restore it once the mutex is grabbed
- * again. Users of this function return EAGAIN which implicitly triggers the
- * transaction abort path to clean up the list of pending transactions.
- */
+struct nft_module_request {
+       struct list_head        list;
+       char                    module[MODULE_NAME_LEN];
+       bool                    done;
+};
+
 #ifdef CONFIG_MODULES
-static void nft_request_module(struct net *net, const char *fmt, ...)
+static int nft_request_module(struct net *net, const char *fmt, ...)
 {
        char module_name[MODULE_NAME_LEN];
-       LIST_HEAD(commit_list);
+       struct nft_module_request *req;
        va_list args;
        int ret;
 
-       list_splice_init(&net->nft.commit_list, &commit_list);
-
        va_start(args, fmt);
        ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
        va_end(args);
        if (ret >= MODULE_NAME_LEN)
-               return;
+               return 0;
 
-       mutex_unlock(&net->nft.commit_mutex);
-       request_module("%s", module_name);
-       mutex_lock(&net->nft.commit_mutex);
+       list_for_each_entry(req, &net->nft.module_list, list) {
+               if (!strcmp(req->module, module_name)) {
+                       if (req->done)
+                               return 0;
 
-       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
-       list_splice(&commit_list, &net->nft.commit_list);
+                       /* A request to load this module already exists. */
+                       return -EAGAIN;
+               }
+       }
+
+       req = kmalloc(sizeof(*req), GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
+
+       req->done = false;
+       strlcpy(req->module, module_name, MODULE_NAME_LEN);
+       list_add_tail(&req->list, &net->nft.module_list);
+
+       return -EAGAIN;
 }
 #endif
 
@@ -617,10 +640,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (autoload) {
-               nft_request_module(net, "nft-chain-%u-%.*s", family,
-                                  nla_len(nla), (const char *)nla_data(nla));
-               type = __nf_tables_chain_type_lookup(nla, family);
-               if (type != NULL)
+               if (nft_request_module(net, "nft-chain-%u-%.*s", family,
+                                      nla_len(nla),
+                                      (const char *)nla_data(nla)) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -1162,11 +1184,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
 
 void nft_register_chain_type(const struct nft_chain_type *ctype)
 {
-       if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
-               return;
-
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
+       if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
                nfnl_unlock(NFNL_SUBSYS_NFTABLES);
                return;
        }
@@ -1768,7 +1787,10 @@ static int nft_chain_parse_hook(struct net *net,
        hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
        hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
 
-       type = chain_type[family][NFT_CHAIN_T_DEFAULT];
+       type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
+       if (!type)
+               return -EOPNOTSUPP;
+
        if (nla[NFTA_CHAIN_TYPE]) {
                type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
                                                   family, autoload);
@@ -2328,9 +2350,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
 static int nft_expr_type_request_module(struct net *net, u8 family,
                                        struct nlattr *nla)
 {
-       nft_request_module(net, "nft-expr-%u-%.*s", family,
-                          nla_len(nla), (char *)nla_data(nla));
-       if (__nft_expr_type_get(family, nla))
+       if (nft_request_module(net, "nft-expr-%u-%.*s", family,
+                              nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
                return -EAGAIN;
 
        return 0;
@@ -2356,9 +2377,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
                if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
 
-               nft_request_module(net, "nft-expr-%.*s",
-                                  nla_len(nla), (char *)nla_data(nla));
-               if (__nft_expr_type_get(family, nla))
+               if (nft_request_module(net, "nft-expr-%.*s",
+                                      nla_len(nla),
+                                      (char *)nla_data(nla)) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -2449,9 +2470,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
                        err = PTR_ERR(ops);
 #ifdef CONFIG_MODULES
                        if (err == -EAGAIN)
-                               nft_expr_type_request_module(ctx->net,
-                                                            ctx->family,
-                                                            tb[NFTA_EXPR_NAME]);
+                               if (nft_expr_type_request_module(ctx->net,
+                                                                ctx->family,
+                                                                tb[NFTA_EXPR_NAME]) != -EAGAIN)
+                                       err = -ENOENT;
 #endif
                        goto err1;
                }
@@ -3288,8 +3310,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (list_empty(&nf_tables_set_types)) {
-               nft_request_module(ctx->net, "nft-set");
-               if (!list_empty(&nf_tables_set_types))
+               if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -5415,8 +5436,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (type == NULL) {
-               nft_request_module(net, "nft-obj-%u", objtype);
-               if (__nft_obj_type_get(objtype))
+               if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -5989,8 +6009,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (type == NULL) {
-               nft_request_module(net, "nf-flowtable-%u", family);
-               if (__nft_flowtable_type_get(family))
+               if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -6992,6 +7011,18 @@ static void nft_chain_del(struct nft_chain *chain)
        list_del_rcu(&chain->list);
 }
 
+static void nf_tables_module_autoload_cleanup(struct net *net)
+{
+       struct nft_module_request *req, *next;
+
+       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+       list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
+               WARN_ON_ONCE(!req->done);
+               list_del(&req->list);
+               kfree(req);
+       }
+}
+
 static void nf_tables_commit_release(struct net *net)
 {
        struct nft_trans *trans;
@@ -7004,6 +7035,7 @@ static void nf_tables_commit_release(struct net *net)
         * to prevent expensive synchronize_rcu() in commit phase.
         */
        if (list_empty(&net->nft.commit_list)) {
+               nf_tables_module_autoload_cleanup(net);
                mutex_unlock(&net->nft.commit_mutex);
                return;
        }
@@ -7018,6 +7050,7 @@ static void nf_tables_commit_release(struct net *net)
        list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
        spin_unlock(&nf_tables_destroy_list_lock);
 
+       nf_tables_module_autoload_cleanup(net);
        mutex_unlock(&net->nft.commit_mutex);
 
        schedule_work(&trans_destroy_work);
@@ -7209,6 +7242,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
        return 0;
 }
 
+static void nf_tables_module_autoload(struct net *net)
+{
+       struct nft_module_request *req, *next;
+       LIST_HEAD(module_list);
+
+       list_splice_init(&net->nft.module_list, &module_list);
+       mutex_unlock(&net->nft.commit_mutex);
+       list_for_each_entry_safe(req, next, &module_list, list) {
+               if (req->done) {
+                       list_del(&req->list);
+                       kfree(req);
+               } else {
+                       request_module("%s", req->module);
+                       req->done = true;
+               }
+       }
+       mutex_lock(&net->nft.commit_mutex);
+       list_splice(&module_list, &net->nft.module_list);
+}
+
 static void nf_tables_abort_release(struct nft_trans *trans)
 {
        switch (trans->msg_type) {
@@ -7238,7 +7291,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
        kfree(trans);
 }
 
-static int __nf_tables_abort(struct net *net)
+static int __nf_tables_abort(struct net *net, bool autoload)
 {
        struct nft_trans *trans, *next;
        struct nft_trans_elem *te;
@@ -7360,6 +7413,11 @@ static int __nf_tables_abort(struct net *net)
                nf_tables_abort_release(trans);
        }
 
+       if (autoload)
+               nf_tables_module_autoload(net);
+       else
+               nf_tables_module_autoload_cleanup(net);
+
        return 0;
 }
 
@@ -7368,9 +7426,9 @@ static void nf_tables_cleanup(struct net *net)
        nft_validate_state_update(net, NFT_VALIDATE_SKIP);
 }
 
-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
 {
-       int ret = __nf_tables_abort(net);
+       int ret = __nf_tables_abort(net, autoload);
 
        mutex_unlock(&net->nft.commit_mutex);
 
@@ -7965,6 +8023,7 @@ static int __net_init nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.tables);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       INIT_LIST_HEAD(&net->nft.module_list);
        mutex_init(&net->nft.commit_mutex);
        net->nft.base_seq = 1;
        net->nft.validate_state = NFT_VALIDATE_SKIP;
@@ -7976,7 +8035,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
 {
        mutex_lock(&net->nft.commit_mutex);
        if (!list_empty(&net->nft.commit_list))
-               __nf_tables_abort(net);
+               __nf_tables_abort(net, false);
        __nft_release_tables(net);
        mutex_unlock(&net->nft.commit_mutex);
        WARN_ON_ONCE(!list_empty(&net->nft.tables));
index a9ea29a..2bb2848 100644 (file)
@@ -564,7 +564,7 @@ static void nft_indr_block_cb(struct net_device *dev,
 
        mutex_lock(&net->nft.commit_mutex);
        chain = __nft_offload_get_chain(dev);
-       if (chain) {
+       if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
                struct nft_base_chain *basechain;
 
                basechain = nft_base_chain(chain);
index 4abbb45..99127e2 100644 (file)
@@ -476,7 +476,7 @@ ack:
        }
 done:
        if (status & NFNL_BATCH_REPLAY) {
-               ss->abort(net, oskb);
+               ss->abort(net, oskb, true);
                nfnl_err_reset(&err_list);
                kfree_skb(skb);
                module_put(ss->owner);
@@ -487,11 +487,11 @@ done:
                        status |= NFNL_BATCH_REPLAY;
                        goto done;
                } else if (err) {
-                       ss->abort(net, oskb);
+                       ss->abort(net, oskb, false);
                        netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
                }
        } else {
-               ss->abort(net, oskb);
+               ss->abort(net, oskb, false);
        }
        if (ss->cleanup)
                ss->cleanup(net);
index f54d6ae..b42247a 100644 (file)
@@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
        int err;
        u8 ttl;
 
+       if (!tb[NFTA_OSF_DREG])
+               return -EINVAL;
+
        if (tb[NFTA_OSF_TTL]) {
                ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
                if (ttl > 2)
index 46b8ff2..1e8eeb0 100644 (file)
@@ -1475,7 +1475,7 @@ static int __init rose_proto_init(void)
        int rc;
 
        if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
-               printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
+               printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
                rc = -EINVAL;
                goto out;
        }
index 76e0d12..c2cdd0f 100644 (file)
@@ -2055,9 +2055,8 @@ replay:
                                                               &chain_info));
 
                mutex_unlock(&chain->filter_chain_lock);
-               tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
-                                         protocol, prio, chain, rtnl_held,
-                                         extack);
+               tp_new = tcf_proto_create(name, protocol, prio, chain,
+                                         rtnl_held, extack);
                if (IS_ERR(tp_new)) {
                        err = PTR_ERR(tp_new);
                        goto errout_tp;
index 8f2ad70..d0140a9 100644 (file)
@@ -263,12 +263,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
                                }
                                em->data = (unsigned long) v;
                        }
+                       em->datalen = data_len;
                }
        }
 
        em->matchid = em_hdr->matchid;
        em->flags = em_hdr->flags;
-       em->datalen = data_len;
        em->net = net;
 
        err = 0;
index 7ac1542..dc651a6 100644 (file)
@@ -268,9 +268,6 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int err = -1;
        int mtu;
 
-       if (!dst)
-               goto tx_err_link_failure;
-
        dst_hold(dst);
        dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
        if (IS_ERR(dst)) {
@@ -297,7 +294,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
@@ -343,6 +340,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
        struct net_device_stats *stats = &xi->dev->stats;
+       struct dst_entry *dst = skb_dst(skb);
        struct flowi fl;
        int ret;
 
@@ -352,10 +350,33 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
        case htons(ETH_P_IPV6):
                xfrm_decode_session(skb, &fl, AF_INET6);
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+               if (!dst) {
+                       fl.u.ip6.flowi6_oif = dev->ifindex;
+                       fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               stats->tx_carrier_errors++;
+                               goto tx_err;
+                       }
+                       skb_dst_set(skb, dst);
+               }
                break;
        case htons(ETH_P_IP):
                xfrm_decode_session(skb, &fl, AF_INET);
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+               if (!dst) {
+                       struct rtable *rt;
+
+                       fl.u.ip4.flowi4_oif = dev->ifindex;
+                       fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+                       if (IS_ERR(rt)) {
+                               stats->tx_carrier_errors++;
+                               goto tx_err;
+                       }
+                       skb_dst_set(skb, &rt->dst);
+               }
                break;
        default:
                goto tx_err;
@@ -563,12 +584,9 @@ static void xfrmi_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &xfrmi_netdev_ops;
        dev->type               = ARPHRD_NONE;
-       dev->hard_header_len    = ETH_HLEN;
-       dev->min_header_len     = ETH_HLEN;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
-       dev->max_mtu            = ETH_DATA_LEN;
-       dev->addr_len           = ETH_ALEN;
+       dev->max_mtu            = IP_MAX_MTU;
        dev->flags              = IFF_NOARP;
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = xfrmi_dev_free;
index e89ca45..918ce17 100644 (file)
@@ -52,17 +52,21 @@ struct dummy {
  */
 static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data)
 {
-       void **shadow_leak = shadow_data;
-       void *leak = ctor_data;
+       int **shadow_leak = shadow_data;
+       int **leak = ctor_data;
 
-       *shadow_leak = leak;
+       if (!ctor_data)
+               return -EINVAL;
+
+       *shadow_leak = *leak;
        return 0;
 }
 
 static struct dummy *livepatch_fix1_dummy_alloc(void)
 {
        struct dummy *d;
-       void *leak;
+       int *leak;
+       int **shadow_leak;
 
        d = kzalloc(sizeof(*d), GFP_KERNEL);
        if (!d)
@@ -76,25 +80,34 @@ static struct dummy *livepatch_fix1_dummy_alloc(void)
         * variable.  A patched dummy_free routine can later fetch this
         * pointer to handle resource release.
         */
-       leak = kzalloc(sizeof(int), GFP_KERNEL);
-       if (!leak) {
-               kfree(d);
-               return NULL;
+       leak = kzalloc(sizeof(*leak), GFP_KERNEL);
+       if (!leak)
+               goto err_leak;
+
+       shadow_leak = klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
+                                      shadow_leak_ctor, &leak);
+       if (!shadow_leak) {
+               pr_err("%s: failed to allocate shadow variable for the leaking pointer: dummy @ %p, leak @ %p\n",
+                      __func__, d, leak);
+               goto err_shadow;
        }
 
-       klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
-                        shadow_leak_ctor, leak);
-
        pr_info("%s: dummy @ %p, expires @ %lx\n",
                __func__, d, d->jiffies_expire);
 
        return d;
+
+err_shadow:
+       kfree(leak);
+err_leak:
+       kfree(d);
+       return NULL;
 }
 
 static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
 {
        void *d = obj;
-       void **shadow_leak = shadow_data;
+       int **shadow_leak = shadow_data;
 
        kfree(*shadow_leak);
        pr_info("%s: dummy @ %p, prevented leak @ %p\n",
@@ -103,7 +116,7 @@ static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
 
 static void livepatch_fix1_dummy_free(struct dummy *d)
 {
-       void **shadow_leak;
+       int **shadow_leak;
 
        /*
         * Patch: fetch the saved SV_LEAK shadow variable, detach and
index 50d223b..29fe5cd 100644 (file)
@@ -59,7 +59,7 @@ static bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies)
 static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
 {
        void *d = obj;
-       void **shadow_leak = shadow_data;
+       int **shadow_leak = shadow_data;
 
        kfree(*shadow_leak);
        pr_info("%s: dummy @ %p, prevented leak @ %p\n",
@@ -68,7 +68,7 @@ static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
 
 static void livepatch_fix2_dummy_free(struct dummy *d)
 {
-       void **shadow_leak;
+       int **shadow_leak;
        int *shadow_count;
 
        /* Patch: copy the memory leak patch from the fix1 module. */
index ecfe83a..7e753b0 100644 (file)
@@ -95,7 +95,7 @@ struct dummy {
 static __used noinline struct dummy *dummy_alloc(void)
 {
        struct dummy *d;
-       void *leak;
+       int *leak;
 
        d = kzalloc(sizeof(*d), GFP_KERNEL);
        if (!d)
@@ -105,7 +105,7 @@ static __used noinline struct dummy *dummy_alloc(void)
                msecs_to_jiffies(1000 * EXPIRE_PERIOD);
 
        /* Oops, forgot to save leak! */
-       leak = kzalloc(sizeof(int), GFP_KERNEL);
+       leak = kzalloc(sizeof(*leak), GFP_KERNEL);
        if (!leak) {
                kfree(d);
                return NULL;
index d4adfbe..9d07e59 100644 (file)
@@ -31,6 +31,10 @@ cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /de
 # Return y if the linker supports <flag>, n otherwise
 ld-option = $(success,$(LD) -v $(1))
 
+# $(as-instr,<instr>)
+# Return y if the assembler supports <instr>, n otherwise
+as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
+
 # check if $(CC) and $(LD) exist
 $(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found)
 $(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found)
index 612268e..7225107 100644 (file)
 #define R_AARCH64_ABS64        257
 #endif
 
+#define R_ARM_PC24             1
+#define R_ARM_THM_CALL         10
+#define R_ARM_CALL             28
+
 static int fd_map;     /* File descriptor for file being modified. */
 static int mmap_failed; /* Boolean flag. */
 static char gpfx;      /* prefix for global symbol name (sometimes '_') */
@@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done!
 #define RECORD_MCOUNT_64
 #include "recordmcount.h"
 
+static int arm_is_fake_mcount(Elf32_Rel const *rp)
+{
+       switch (ELF32_R_TYPE(w(rp->r_info))) {
+       case R_ARM_THM_CALL:
+       case R_ARM_CALL:
+       case R_ARM_PC24:
+               return 0;
+       }
+
+       return 1;
+}
+
 /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
  * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
  * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
@@ -523,6 +539,7 @@ static int do_file(char const *const fname)
                altmcount = "__gnu_mcount_nc";
                make_nop = make_nop_arm;
                rel_type_nop = R_ARM_NONE;
+               is_fake_mcount32 = arm_is_fake_mcount;
                gpfx = 0;
                break;
        case EM_AARCH64:
index 944183f..2b2b816 100644 (file)
@@ -15,7 +15,7 @@ struct process_cmd_struct {
        int arg;
 };
 
-static const char *version_str = "v1.1";
+static const char *version_str = "v1.2";
 static const int supported_api_ver = 1;
 static struct isst_if_platform_info isst_platform_info;
 static char *progname;
@@ -1384,14 +1384,10 @@ static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                goto disp_result;
        }
 
-       if (auto_mode) {
-               if (status) {
-                       ret = set_pbf_core_power(cpu);
-                       if (ret)
-                               goto disp_result;
-               } else {
-                       isst_pm_qos_config(cpu, 0, 0);
-               }
+       if (auto_mode && status) {
+               ret = set_pbf_core_power(cpu);
+               if (ret)
+                       goto disp_result;
        }
 
        ret = isst_set_pbf_fact_status(cpu, 1, status);
@@ -1408,6 +1404,9 @@ static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                }
        }
 
+       if (auto_mode && !status)
+               isst_pm_qos_config(cpu, 0, 0);
+
 disp_result:
        if (status)
                isst_display_result(cpu, outf, "base-freq", "enable",
@@ -1496,14 +1495,10 @@ static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
        int ret;
        int status = *(int *)arg4;
 
-       if (auto_mode) {
-               if (status) {
-                       ret = isst_pm_qos_config(cpu, 1, 1);
-                       if (ret)
-                               goto disp_results;
-               } else {
-                       isst_pm_qos_config(cpu, 0, 0);
-               }
+       if (auto_mode && status) {
+               ret = isst_pm_qos_config(cpu, 1, 1);
+               if (ret)
+                       goto disp_results;
        }
 
        ret = isst_set_pbf_fact_status(cpu, 0, status);
@@ -1524,6 +1519,9 @@ static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                        ret = isst_set_trl(cpu, fact_trl);
                if (ret && auto_mode)
                        isst_pm_qos_config(cpu, 0, 0);
+       } else {
+               if (auto_mode)
+                       isst_pm_qos_config(cpu, 0, 0);
        }
 
 disp_results:
@@ -1638,7 +1636,7 @@ static void set_fact_enable(int arg)
                        if (ret)
                                goto error_disp;
                }
-               isst_display_result(i, outf, "turbo-freq --auto", "enable", 0);
+               isst_display_result(-1, outf, "turbo-freq --auto", "enable", 0);
        }
 
        return;
index d14c7bc..81a119f 100644 (file)
@@ -6,6 +6,44 @@
 
 #include "isst.h"
 
+int isst_write_pm_config(int cpu, int cp_state)
+{
+       unsigned int req, resp;
+       int ret;
+
+       if (cp_state)
+               req = BIT(16);
+       else
+               req = 0;
+
+       ret = isst_send_mbox_command(cpu, WRITE_PM_CONFIG, PM_FEATURE, 0, req,
+                                    &resp);
+       if (ret)
+               return ret;
+
+       debug_printf("cpu:%d WRITE_PM_CONFIG resp:%x\n", cpu, resp);
+
+       return 0;
+}
+
+int isst_read_pm_config(int cpu, int *cp_state, int *cp_cap)
+{
+       unsigned int resp;
+       int ret;
+
+       ret = isst_send_mbox_command(cpu, READ_PM_CONFIG, PM_FEATURE, 0, 0,
+                                    &resp);
+       if (ret)
+               return ret;
+
+       debug_printf("cpu:%d READ_PM_CONFIG resp:%x\n", cpu, resp);
+
+       *cp_state = resp & BIT(16);
+       *cp_cap = resp & BIT(0) ? 1 : 0;
+
+       return 0;
+}
+
 int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
 {
        unsigned int resp;
@@ -36,6 +74,7 @@ int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
 int isst_get_ctdp_control(int cpu, int config_index,
                          struct isst_pkg_ctdp_level_info *ctdp_level)
 {
+       int cp_state, cp_cap;
        unsigned int resp;
        int ret;
 
@@ -50,6 +89,15 @@ int isst_get_ctdp_control(int cpu, int config_index,
        ctdp_level->fact_enabled = !!(resp & BIT(16));
        ctdp_level->pbf_enabled = !!(resp & BIT(17));
 
+       ret = isst_read_pm_config(cpu, &cp_state, &cp_cap);
+       if (ret) {
+               debug_printf("cpu:%d pm_config is not supported \n", cpu);
+       } else {
+               debug_printf("cpu:%d pm_config SST-CP state:%d cap:%d \n", cpu, cp_state, cp_cap);
+               ctdp_level->sst_cp_support = cp_cap;
+               ctdp_level->sst_cp_enabled = cp_state;
+       }
+
        debug_printf(
                "cpu:%d CONFIG_TDP_GET_TDP_CONTROL resp:%x fact_support:%d pbf_support: %d fact_enabled:%d pbf_enabled:%d\n",
                cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support,
@@ -779,6 +827,13 @@ int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
                        debug_printf("Turbo-freq feature must be disabled first\n");
                        return -EINVAL;
                }
+               ret = isst_write_pm_config(cpu, 0);
+               if (ret)
+                       perror("isst_write_pm_config\n");
+       } else {
+               ret = isst_write_pm_config(cpu, 1);
+               if (ret)
+                       perror("isst_write_pm_config\n");
        }
 
        ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
index 040dd09..4fb0c1d 100644 (file)
@@ -418,6 +418,17 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
                        snprintf(value, sizeof(value), "unsupported");
                format_and_print(outf, base_level + 4, header, value);
 
+               snprintf(header, sizeof(header),
+                        "speed-select-core-power");
+               if (ctdp_level->sst_cp_support) {
+                       if (ctdp_level->sst_cp_enabled)
+                               snprintf(value, sizeof(value), "enabled");
+                       else
+                               snprintf(value, sizeof(value), "disabled");
+               } else
+                       snprintf(value, sizeof(value), "unsupported");
+               format_and_print(outf, base_level + 4, header, value);
+
                if (is_clx_n_platform()) {
                        if (ctdp_level->pbf_support)
                                _isst_pbf_display_information(cpu, outf,
@@ -634,13 +645,15 @@ void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
        char header[256];
        char value[256];
 
-       snprintf(header, sizeof(header), "package-%d",
-                get_physical_package_id(cpu));
-       format_and_print(outf, 1, header, NULL);
-       snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
-       format_and_print(outf, 2, header, NULL);
-       snprintf(header, sizeof(header), "cpu-%d", cpu);
-       format_and_print(outf, 3, header, NULL);
+       if (cpu >= 0) {
+               snprintf(header, sizeof(header), "package-%d",
+                        get_physical_package_id(cpu));
+               format_and_print(outf, 1, header, NULL);
+               snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+               format_and_print(outf, 2, header, NULL);
+               snprintf(header, sizeof(header), "cpu-%d", cpu);
+               format_and_print(outf, 3, header, NULL);
+       }
        snprintf(header, sizeof(header), "%s", feature);
        format_and_print(outf, 4, header, NULL);
        snprintf(header, sizeof(header), "%s", cmd);
index cdf0f8a..ad5aa63 100644 (file)
 #define PM_CLOS_OFFSET                         0x08
 #define PQR_ASSOC_OFFSET                       0x20
 
+#define READ_PM_CONFIG                         0x94
+#define WRITE_PM_CONFIG                                0x95
+#define PM_FEATURE                             0x03
+
 #define DISP_FREQ_MULTIPLIER 100
 
 struct isst_clos_config {
@@ -119,6 +123,8 @@ struct isst_pkg_ctdp_level_info {
        int pbf_support;
        int fact_enabled;
        int pbf_enabled;
+       int sst_cp_support;
+       int sst_cp_enabled;
        int tdp_ratio;
        int active;
        int tdp_control;